clang API Documentation
00001 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code. 00011 // 00012 //===----------------------------------------------------------------------===// 00013 00014 #include "CodeGenFunction.h" 00015 #include "CGCXXABI.h" 00016 #include "CGDebugInfo.h" 00017 #include "CGObjCRuntime.h" 00018 #include "CodeGenModule.h" 00019 #include "clang/AST/ASTContext.h" 00020 #include "clang/AST/DeclObjC.h" 00021 #include "clang/AST/RecordLayout.h" 00022 #include "clang/AST/StmtVisitor.h" 00023 #include "clang/Basic/TargetInfo.h" 00024 #include "clang/Frontend/CodeGenOptions.h" 00025 #include "llvm/IR/CFG.h" 00026 #include "llvm/IR/Constants.h" 00027 #include "llvm/IR/DataLayout.h" 00028 #include "llvm/IR/Function.h" 00029 #include "llvm/IR/GlobalVariable.h" 00030 #include "llvm/IR/Intrinsics.h" 00031 #include "llvm/IR/Module.h" 00032 #include <cstdarg> 00033 00034 using namespace clang; 00035 using namespace CodeGen; 00036 using llvm::Value; 00037 00038 //===----------------------------------------------------------------------===// 00039 // Scalar Expression Emitter 00040 //===----------------------------------------------------------------------===// 00041 00042 namespace { 00043 struct BinOpInfo { 00044 Value *LHS; 00045 Value *RHS; 00046 QualType Ty; // Computation Type. 00047 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform 00048 bool FPContractable; 00049 const Expr *E; // Entire expr, for error unsupported. May not be binop. 00050 }; 00051 00052 static bool MustVisitNullValue(const Expr *E) { 00053 // If a null pointer expression's type is the C++0x nullptr_t, then 00054 // it's not necessarily a simple constant and it must be evaluated 00055 // for its potential side effects. 00056 return E->getType()->isNullPtrType(); 00057 } 00058 00059 class ScalarExprEmitter 00060 : public StmtVisitor<ScalarExprEmitter, Value*> { 00061 CodeGenFunction &CGF; 00062 CGBuilderTy &Builder; 00063 bool IgnoreResultAssign; 00064 llvm::LLVMContext &VMContext; 00065 public: 00066 00067 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false) 00068 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira), 00069 VMContext(cgf.getLLVMContext()) { 00070 } 00071 00072 //===--------------------------------------------------------------------===// 00073 // Utilities 00074 //===--------------------------------------------------------------------===// 00075 00076 bool TestAndClearIgnoreResultAssign() { 00077 bool I = IgnoreResultAssign; 00078 IgnoreResultAssign = false; 00079 return I; 00080 } 00081 00082 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); } 00083 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); } 00084 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) { 00085 return CGF.EmitCheckedLValue(E, TCK); 00086 } 00087 00088 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerKind>> Checks, 00089 const BinOpInfo &Info); 00090 00091 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) { 00092 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal(); 00093 } 00094 00095 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) { 00096 const AlignValueAttr *AVAttr = nullptr; 00097 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 00098 const ValueDecl *VD = DRE->getDecl(); 00099 00100 if (VD->getType()->isReferenceType()) { 00101 if (const auto *TTy = 00102 dyn_cast<TypedefType>(VD->getType().getNonReferenceType())) 00103 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); 00104 } else { 00105 // Assumptions for function parameters are emitted at the start of the 00106 // function, so there is no need to repeat that here. 00107 if (isa<ParmVarDecl>(VD)) 00108 return; 00109 00110 AVAttr = VD->getAttr<AlignValueAttr>(); 00111 } 00112 } 00113 00114 if (!AVAttr) 00115 if (const auto *TTy = 00116 dyn_cast<TypedefType>(E->getType())) 00117 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); 00118 00119 if (!AVAttr) 00120 return; 00121 00122 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment()); 00123 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue); 00124 CGF.EmitAlignmentAssumption(V, AlignmentCI->getZExtValue()); 00125 } 00126 00127 /// EmitLoadOfLValue - Given an expression with complex type that represents a 00128 /// value l-value, this method emits the address of the l-value, then loads 00129 /// and returns the result. 00130 Value *EmitLoadOfLValue(const Expr *E) { 00131 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load), 00132 E->getExprLoc()); 00133 00134 EmitLValueAlignmentAssumption(E, V); 00135 return V; 00136 } 00137 00138 /// EmitConversionToBool - Convert the specified expression value to a 00139 /// boolean (i1) truth value. This is equivalent to "Val != 0". 00140 Value *EmitConversionToBool(Value *Src, QualType DstTy); 00141 00142 /// \brief Emit a check that a conversion to or from a floating-point type 00143 /// does not overflow. 00144 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType, 00145 Value *Src, QualType SrcType, 00146 QualType DstType, llvm::Type *DstTy); 00147 00148 /// EmitScalarConversion - Emit a conversion from the specified type to the 00149 /// specified destination type, both of which are LLVM scalar types. 00150 Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy); 00151 00152 /// EmitComplexToScalarConversion - Emit a conversion from the specified 00153 /// complex type to the specified destination type, where the destination type 00154 /// is an LLVM scalar type. 00155 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, 00156 QualType SrcTy, QualType DstTy); 00157 00158 /// EmitNullValue - Emit a value that corresponds to null for the given type. 00159 Value *EmitNullValue(QualType Ty); 00160 00161 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion. 00162 Value *EmitFloatToBoolConversion(Value *V) { 00163 // Compare against 0.0 for fp scalars. 00164 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType()); 00165 return Builder.CreateFCmpUNE(V, Zero, "tobool"); 00166 } 00167 00168 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion. 00169 Value *EmitPointerToBoolConversion(Value *V) { 00170 Value *Zero = llvm::ConstantPointerNull::get( 00171 cast<llvm::PointerType>(V->getType())); 00172 return Builder.CreateICmpNE(V, Zero, "tobool"); 00173 } 00174 00175 Value *EmitIntToBoolConversion(Value *V) { 00176 // Because of the type rules of C, we often end up computing a 00177 // logical value, then zero extending it to int, then wanting it 00178 // as a logical value again. Optimize this common case. 00179 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) { 00180 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) { 00181 Value *Result = ZI->getOperand(0); 00182 // If there aren't any more uses, zap the instruction to save space. 00183 // Note that there can be more uses, for example if this 00184 // is the result of an assignment. 00185 if (ZI->use_empty()) 00186 ZI->eraseFromParent(); 00187 return Result; 00188 } 00189 } 00190 00191 return Builder.CreateIsNotNull(V, "tobool"); 00192 } 00193 00194 //===--------------------------------------------------------------------===// 00195 // Visitor Methods 00196 //===--------------------------------------------------------------------===// 00197 00198 Value *Visit(Expr *E) { 00199 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E); 00200 } 00201 00202 Value *VisitStmt(Stmt *S) { 00203 S->dump(CGF.getContext().getSourceManager()); 00204 llvm_unreachable("Stmt can't have complex result type!"); 00205 } 00206 Value *VisitExpr(Expr *S); 00207 00208 Value *VisitParenExpr(ParenExpr *PE) { 00209 return Visit(PE->getSubExpr()); 00210 } 00211 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { 00212 return Visit(E->getReplacement()); 00213 } 00214 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) { 00215 return Visit(GE->getResultExpr()); 00216 } 00217 00218 // Leaves. 00219 Value *VisitIntegerLiteral(const IntegerLiteral *E) { 00220 return Builder.getInt(E->getValue()); 00221 } 00222 Value *VisitFloatingLiteral(const FloatingLiteral *E) { 00223 return llvm::ConstantFP::get(VMContext, E->getValue()); 00224 } 00225 Value *VisitCharacterLiteral(const CharacterLiteral *E) { 00226 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 00227 } 00228 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { 00229 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 00230 } 00231 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { 00232 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 00233 } 00234 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { 00235 return EmitNullValue(E->getType()); 00236 } 00237 Value *VisitGNUNullExpr(const GNUNullExpr *E) { 00238 return EmitNullValue(E->getType()); 00239 } 00240 Value *VisitOffsetOfExpr(OffsetOfExpr *E); 00241 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); 00242 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) { 00243 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel()); 00244 return Builder.CreateBitCast(V, ConvertType(E->getType())); 00245 } 00246 00247 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) { 00248 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength()); 00249 } 00250 00251 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) { 00252 return CGF.EmitPseudoObjectRValue(E).getScalarVal(); 00253 } 00254 00255 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) { 00256 if (E->isGLValue()) 00257 return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getExprLoc()); 00258 00259 // Otherwise, assume the mapping is the scalar directly. 00260 return CGF.getOpaqueRValueMapping(E).getScalarVal(); 00261 } 00262 00263 // l-values. 00264 Value *VisitDeclRefExpr(DeclRefExpr *E) { 00265 if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) { 00266 if (result.isReference()) 00267 return EmitLoadOfLValue(result.getReferenceLValue(CGF, E), 00268 E->getExprLoc()); 00269 return result.getValue(); 00270 } 00271 return EmitLoadOfLValue(E); 00272 } 00273 00274 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { 00275 return CGF.EmitObjCSelectorExpr(E); 00276 } 00277 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) { 00278 return CGF.EmitObjCProtocolExpr(E); 00279 } 00280 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 00281 return EmitLoadOfLValue(E); 00282 } 00283 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) { 00284 if (E->getMethodDecl() && 00285 E->getMethodDecl()->getReturnType()->isReferenceType()) 00286 return EmitLoadOfLValue(E); 00287 return CGF.EmitObjCMessageExpr(E).getScalarVal(); 00288 } 00289 00290 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) { 00291 LValue LV = CGF.EmitObjCIsaExpr(E); 00292 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); 00293 return V; 00294 } 00295 00296 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); 00297 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); 00298 Value *VisitConvertVectorExpr(ConvertVectorExpr *E); 00299 Value *VisitMemberExpr(MemberExpr *E); 00300 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } 00301 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 00302 return EmitLoadOfLValue(E); 00303 } 00304 00305 Value *VisitInitListExpr(InitListExpr *E); 00306 00307 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { 00308 return EmitNullValue(E->getType()); 00309 } 00310 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) { 00311 if (E->getType()->isVariablyModifiedType()) 00312 CGF.EmitVariablyModifiedType(E->getType()); 00313 00314 if (CGDebugInfo *DI = CGF.getDebugInfo()) 00315 DI->EmitExplicitCastType(E->getType()); 00316 00317 return VisitCastExpr(E); 00318 } 00319 Value *VisitCastExpr(CastExpr *E); 00320 00321 Value *VisitCallExpr(const CallExpr *E) { 00322 if (E->getCallReturnType()->isReferenceType()) 00323 return EmitLoadOfLValue(E); 00324 00325 Value *V = CGF.EmitCallExpr(E).getScalarVal(); 00326 00327 EmitLValueAlignmentAssumption(E, V); 00328 return V; 00329 } 00330 00331 Value *VisitStmtExpr(const StmtExpr *E); 00332 00333 // Unary Operators. 00334 Value *VisitUnaryPostDec(const UnaryOperator *E) { 00335 LValue LV = EmitLValue(E->getSubExpr()); 00336 return EmitScalarPrePostIncDec(E, LV, false, false); 00337 } 00338 Value *VisitUnaryPostInc(const UnaryOperator *E) { 00339 LValue LV = EmitLValue(E->getSubExpr()); 00340 return EmitScalarPrePostIncDec(E, LV, true, false); 00341 } 00342 Value *VisitUnaryPreDec(const UnaryOperator *E) { 00343 LValue LV = EmitLValue(E->getSubExpr()); 00344 return EmitScalarPrePostIncDec(E, LV, false, true); 00345 } 00346 Value *VisitUnaryPreInc(const UnaryOperator *E) { 00347 LValue LV = EmitLValue(E->getSubExpr()); 00348 return EmitScalarPrePostIncDec(E, LV, true, true); 00349 } 00350 00351 llvm::Value *EmitAddConsiderOverflowBehavior(const UnaryOperator *E, 00352 llvm::Value *InVal, 00353 llvm::Value *NextVal, 00354 bool IsInc); 00355 00356 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 00357 bool isInc, bool isPre); 00358 00359 00360 Value *VisitUnaryAddrOf(const UnaryOperator *E) { 00361 if (isa<MemberPointerType>(E->getType())) // never sugared 00362 return CGF.CGM.getMemberPointerConstant(E); 00363 00364 return EmitLValue(E->getSubExpr()).getAddress(); 00365 } 00366 Value *VisitUnaryDeref(const UnaryOperator *E) { 00367 if (E->getType()->isVoidType()) 00368 return Visit(E->getSubExpr()); // the actual value should be unused 00369 return EmitLoadOfLValue(E); 00370 } 00371 Value *VisitUnaryPlus(const UnaryOperator *E) { 00372 // This differs from gcc, though, most likely due to a bug in gcc. 00373 TestAndClearIgnoreResultAssign(); 00374 return Visit(E->getSubExpr()); 00375 } 00376 Value *VisitUnaryMinus (const UnaryOperator *E); 00377 Value *VisitUnaryNot (const UnaryOperator *E); 00378 Value *VisitUnaryLNot (const UnaryOperator *E); 00379 Value *VisitUnaryReal (const UnaryOperator *E); 00380 Value *VisitUnaryImag (const UnaryOperator *E); 00381 Value *VisitUnaryExtension(const UnaryOperator *E) { 00382 return Visit(E->getSubExpr()); 00383 } 00384 00385 // C++ 00386 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) { 00387 return EmitLoadOfLValue(E); 00388 } 00389 00390 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 00391 return Visit(DAE->getExpr()); 00392 } 00393 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { 00394 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF); 00395 return Visit(DIE->getExpr()); 00396 } 00397 Value *VisitCXXThisExpr(CXXThisExpr *TE) { 00398 return CGF.LoadCXXThis(); 00399 } 00400 00401 Value *VisitExprWithCleanups(ExprWithCleanups *E) { 00402 CGF.enterFullExpression(E); 00403 CodeGenFunction::RunCleanupsScope Scope(CGF); 00404 return Visit(E->getSubExpr()); 00405 } 00406 Value *VisitCXXNewExpr(const CXXNewExpr *E) { 00407 return CGF.EmitCXXNewExpr(E); 00408 } 00409 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) { 00410 CGF.EmitCXXDeleteExpr(E); 00411 return nullptr; 00412 } 00413 00414 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) { 00415 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 00416 } 00417 00418 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) { 00419 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue()); 00420 } 00421 00422 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) { 00423 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue()); 00424 } 00425 00426 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) { 00427 // C++ [expr.pseudo]p1: 00428 // The result shall only be used as the operand for the function call 00429 // operator (), and the result of such a call has type void. The only 00430 // effect is the evaluation of the postfix-expression before the dot or 00431 // arrow. 00432 CGF.EmitScalarExpr(E->getBase()); 00433 return nullptr; 00434 } 00435 00436 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) { 00437 return EmitNullValue(E->getType()); 00438 } 00439 00440 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) { 00441 CGF.EmitCXXThrowExpr(E); 00442 return nullptr; 00443 } 00444 00445 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) { 00446 return Builder.getInt1(E->getValue()); 00447 } 00448 00449 // Binary Operators. 00450 Value *EmitMul(const BinOpInfo &Ops) { 00451 if (Ops.Ty->isSignedIntegerOrEnumerationType()) { 00452 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 00453 case LangOptions::SOB_Defined: 00454 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 00455 case LangOptions::SOB_Undefined: 00456 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 00457 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); 00458 // Fall through. 00459 case LangOptions::SOB_Trapping: 00460 return EmitOverflowCheckedBinOp(Ops); 00461 } 00462 } 00463 00464 if (Ops.Ty->isUnsignedIntegerType() && 00465 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) 00466 return EmitOverflowCheckedBinOp(Ops); 00467 00468 if (Ops.LHS->getType()->isFPOrFPVectorTy()) 00469 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); 00470 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 00471 } 00472 /// Create a binary op that checks for overflow. 00473 /// Currently only supports +, - and *. 00474 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops); 00475 00476 // Check for undefined division and modulus behaviors. 00477 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops, 00478 llvm::Value *Zero,bool isDiv); 00479 // Common helper for getting how wide LHS of shift is. 00480 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS); 00481 Value *EmitDiv(const BinOpInfo &Ops); 00482 Value *EmitRem(const BinOpInfo &Ops); 00483 Value *EmitAdd(const BinOpInfo &Ops); 00484 Value *EmitSub(const BinOpInfo &Ops); 00485 Value *EmitShl(const BinOpInfo &Ops); 00486 Value *EmitShr(const BinOpInfo &Ops); 00487 Value *EmitAnd(const BinOpInfo &Ops) { 00488 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and"); 00489 } 00490 Value *EmitXor(const BinOpInfo &Ops) { 00491 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor"); 00492 } 00493 Value *EmitOr (const BinOpInfo &Ops) { 00494 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or"); 00495 } 00496 00497 BinOpInfo EmitBinOps(const BinaryOperator *E); 00498 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E, 00499 Value *(ScalarExprEmitter::*F)(const BinOpInfo &), 00500 Value *&Result); 00501 00502 Value *EmitCompoundAssign(const CompoundAssignOperator *E, 00503 Value *(ScalarExprEmitter::*F)(const BinOpInfo &)); 00504 00505 // Binary operators and binary compound assignment operators. 00506 #define HANDLEBINOP(OP) \ 00507 Value *VisitBin ## OP(const BinaryOperator *E) { \ 00508 return Emit ## OP(EmitBinOps(E)); \ 00509 } \ 00510 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \ 00511 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \ 00512 } 00513 HANDLEBINOP(Mul) 00514 HANDLEBINOP(Div) 00515 HANDLEBINOP(Rem) 00516 HANDLEBINOP(Add) 00517 HANDLEBINOP(Sub) 00518 HANDLEBINOP(Shl) 00519 HANDLEBINOP(Shr) 00520 HANDLEBINOP(And) 00521 HANDLEBINOP(Xor) 00522 HANDLEBINOP(Or) 00523 #undef HANDLEBINOP 00524 00525 // Comparisons. 00526 Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc, 00527 unsigned SICmpOpc, unsigned FCmpOpc); 00528 #define VISITCOMP(CODE, UI, SI, FP) \ 00529 Value *VisitBin##CODE(const BinaryOperator *E) { \ 00530 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \ 00531 llvm::FCmpInst::FP); } 00532 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT) 00533 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT) 00534 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE) 00535 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE) 00536 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ) 00537 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE) 00538 #undef VISITCOMP 00539 00540 Value *VisitBinAssign (const BinaryOperator *E); 00541 00542 Value *VisitBinLAnd (const BinaryOperator *E); 00543 Value *VisitBinLOr (const BinaryOperator *E); 00544 Value *VisitBinComma (const BinaryOperator *E); 00545 00546 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); } 00547 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); } 00548 00549 // Other Operators. 00550 Value *VisitBlockExpr(const BlockExpr *BE); 00551 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *); 00552 Value *VisitChooseExpr(ChooseExpr *CE); 00553 Value *VisitVAArgExpr(VAArgExpr *VE); 00554 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) { 00555 return CGF.EmitObjCStringLiteral(E); 00556 } 00557 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) { 00558 return CGF.EmitObjCBoxedExpr(E); 00559 } 00560 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) { 00561 return CGF.EmitObjCArrayLiteral(E); 00562 } 00563 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) { 00564 return CGF.EmitObjCDictionaryLiteral(E); 00565 } 00566 Value *VisitAsTypeExpr(AsTypeExpr *CE); 00567 Value *VisitAtomicExpr(AtomicExpr *AE); 00568 }; 00569 } // end anonymous namespace. 00570 00571 //===----------------------------------------------------------------------===// 00572 // Utilities 00573 //===----------------------------------------------------------------------===// 00574 00575 /// EmitConversionToBool - Convert the specified expression value to a 00576 /// boolean (i1) truth value. This is equivalent to "Val != 0". 00577 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { 00578 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); 00579 00580 if (SrcType->isRealFloatingType()) 00581 return EmitFloatToBoolConversion(Src); 00582 00583 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType)) 00584 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT); 00585 00586 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && 00587 "Unknown scalar type to convert"); 00588 00589 if (isa<llvm::IntegerType>(Src->getType())) 00590 return EmitIntToBoolConversion(Src); 00591 00592 assert(isa<llvm::PointerType>(Src->getType())); 00593 return EmitPointerToBoolConversion(Src); 00594 } 00595 00596 void ScalarExprEmitter::EmitFloatConversionCheck(Value *OrigSrc, 00597 QualType OrigSrcType, 00598 Value *Src, QualType SrcType, 00599 QualType DstType, 00600 llvm::Type *DstTy) { 00601 CodeGenFunction::SanitizerScope SanScope(&CGF); 00602 using llvm::APFloat; 00603 using llvm::APSInt; 00604 00605 llvm::Type *SrcTy = Src->getType(); 00606 00607 llvm::Value *Check = nullptr; 00608 if (llvm::IntegerType *IntTy = dyn_cast<llvm::IntegerType>(SrcTy)) { 00609 // Integer to floating-point. This can fail for unsigned short -> __half 00610 // or unsigned __int128 -> float. 00611 assert(DstType->isFloatingType()); 00612 bool SrcIsUnsigned = OrigSrcType->isUnsignedIntegerOrEnumerationType(); 00613 00614 APFloat LargestFloat = 00615 APFloat::getLargest(CGF.getContext().getFloatTypeSemantics(DstType)); 00616 APSInt LargestInt(IntTy->getBitWidth(), SrcIsUnsigned); 00617 00618 bool IsExact; 00619 if (LargestFloat.convertToInteger(LargestInt, APFloat::rmTowardZero, 00620 &IsExact) != APFloat::opOK) 00621 // The range of representable values of this floating point type includes 00622 // all values of this integer type. Don't need an overflow check. 00623 return; 00624 00625 llvm::Value *Max = llvm::ConstantInt::get(VMContext, LargestInt); 00626 if (SrcIsUnsigned) 00627 Check = Builder.CreateICmpULE(Src, Max); 00628 else { 00629 llvm::Value *Min = llvm::ConstantInt::get(VMContext, -LargestInt); 00630 llvm::Value *GE = Builder.CreateICmpSGE(Src, Min); 00631 llvm::Value *LE = Builder.CreateICmpSLE(Src, Max); 00632 Check = Builder.CreateAnd(GE, LE); 00633 } 00634 } else { 00635 const llvm::fltSemantics &SrcSema = 00636 CGF.getContext().getFloatTypeSemantics(OrigSrcType); 00637 if (isa<llvm::IntegerType>(DstTy)) { 00638 // Floating-point to integer. This has undefined behavior if the source is 00639 // +-Inf, NaN, or doesn't fit into the destination type (after truncation 00640 // to an integer). 00641 unsigned Width = CGF.getContext().getIntWidth(DstType); 00642 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType(); 00643 00644 APSInt Min = APSInt::getMinValue(Width, Unsigned); 00645 APFloat MinSrc(SrcSema, APFloat::uninitialized); 00646 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) & 00647 APFloat::opOverflow) 00648 // Don't need an overflow check for lower bound. Just check for 00649 // -Inf/NaN. 00650 MinSrc = APFloat::getInf(SrcSema, true); 00651 else 00652 // Find the largest value which is too small to represent (before 00653 // truncation toward zero). 00654 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative); 00655 00656 APSInt Max = APSInt::getMaxValue(Width, Unsigned); 00657 APFloat MaxSrc(SrcSema, APFloat::uninitialized); 00658 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) & 00659 APFloat::opOverflow) 00660 // Don't need an overflow check for upper bound. Just check for 00661 // +Inf/NaN. 00662 MaxSrc = APFloat::getInf(SrcSema, false); 00663 else 00664 // Find the smallest value which is too large to represent (before 00665 // truncation toward zero). 00666 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive); 00667 00668 // If we're converting from __half, convert the range to float to match 00669 // the type of src. 00670 if (OrigSrcType->isHalfType()) { 00671 const llvm::fltSemantics &Sema = 00672 CGF.getContext().getFloatTypeSemantics(SrcType); 00673 bool IsInexact; 00674 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); 00675 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); 00676 } 00677 00678 llvm::Value *GE = 00679 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc)); 00680 llvm::Value *LE = 00681 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc)); 00682 Check = Builder.CreateAnd(GE, LE); 00683 } else { 00684 // FIXME: Maybe split this sanitizer out from float-cast-overflow. 00685 // 00686 // Floating-point to floating-point. This has undefined behavior if the 00687 // source is not in the range of representable values of the destination 00688 // type. The C and C++ standards are spectacularly unclear here. We 00689 // diagnose finite out-of-range conversions, but allow infinities and NaNs 00690 // to convert to the corresponding value in the smaller type. 00691 // 00692 // C11 Annex F gives all such conversions defined behavior for IEC 60559 00693 // conforming implementations. Unfortunately, LLVM's fptrunc instruction 00694 // does not. 00695 00696 // Converting from a lower rank to a higher rank can never have 00697 // undefined behavior, since higher-rank types must have a superset 00698 // of values of lower-rank types. 00699 if (CGF.getContext().getFloatingTypeOrder(OrigSrcType, DstType) != 1) 00700 return; 00701 00702 assert(!OrigSrcType->isHalfType() && 00703 "should not check conversion from __half, it has the lowest rank"); 00704 00705 const llvm::fltSemantics &DstSema = 00706 CGF.getContext().getFloatTypeSemantics(DstType); 00707 APFloat MinBad = APFloat::getLargest(DstSema, false); 00708 APFloat MaxBad = APFloat::getInf(DstSema, false); 00709 00710 bool IsInexact; 00711 MinBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact); 00712 MaxBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact); 00713 00714 Value *AbsSrc = CGF.EmitNounwindRuntimeCall( 00715 CGF.CGM.getIntrinsic(llvm::Intrinsic::fabs, Src->getType()), Src); 00716 llvm::Value *GE = 00717 Builder.CreateFCmpOGT(AbsSrc, llvm::ConstantFP::get(VMContext, MinBad)); 00718 llvm::Value *LE = 00719 Builder.CreateFCmpOLT(AbsSrc, llvm::ConstantFP::get(VMContext, MaxBad)); 00720 Check = Builder.CreateNot(Builder.CreateAnd(GE, LE)); 00721 } 00722 } 00723 00724 // FIXME: Provide a SourceLocation. 00725 llvm::Constant *StaticArgs[] = { 00726 CGF.EmitCheckTypeDescriptor(OrigSrcType), 00727 CGF.EmitCheckTypeDescriptor(DstType) 00728 }; 00729 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow), 00730 "float_cast_overflow", StaticArgs, OrigSrc); 00731 } 00732 00733 /// EmitScalarConversion - Emit a conversion from the specified type to the 00734 /// specified destination type, both of which are LLVM scalar types. 00735 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, 00736 QualType DstType) { 00737 SrcType = CGF.getContext().getCanonicalType(SrcType); 00738 DstType = CGF.getContext().getCanonicalType(DstType); 00739 if (SrcType == DstType) return Src; 00740 00741 if (DstType->isVoidType()) return nullptr; 00742 00743 llvm::Value *OrigSrc = Src; 00744 QualType OrigSrcType = SrcType; 00745 llvm::Type *SrcTy = Src->getType(); 00746 00747 // If casting to/from storage-only half FP, use special intrinsics. 00748 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType && 00749 !CGF.getContext().getLangOpts().HalfArgsAndReturns) { 00750 Src = Builder.CreateCall( 00751 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, 00752 CGF.CGM.FloatTy), 00753 Src); 00754 SrcType = CGF.getContext().FloatTy; 00755 SrcTy = CGF.FloatTy; 00756 } 00757 00758 // Handle conversions to bool first, they are special: comparisons against 0. 00759 if (DstType->isBooleanType()) 00760 return EmitConversionToBool(Src, SrcType); 00761 00762 llvm::Type *DstTy = ConvertType(DstType); 00763 00764 // Ignore conversions like int -> uint. 00765 if (SrcTy == DstTy) 00766 return Src; 00767 00768 // Handle pointer conversions next: pointers can only be converted to/from 00769 // other pointers and integers. Check for pointer types in terms of LLVM, as 00770 // some native types (like Obj-C id) may map to a pointer type. 00771 if (isa<llvm::PointerType>(DstTy)) { 00772 // The source value may be an integer, or a pointer. 00773 if (isa<llvm::PointerType>(SrcTy)) 00774 return Builder.CreateBitCast(Src, DstTy, "conv"); 00775 00776 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); 00777 // First, convert to the correct width so that we control the kind of 00778 // extension. 00779 llvm::Type *MiddleTy = CGF.IntPtrTy; 00780 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType(); 00781 llvm::Value* IntResult = 00782 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 00783 // Then, cast to pointer. 00784 return Builder.CreateIntToPtr(IntResult, DstTy, "conv"); 00785 } 00786 00787 if (isa<llvm::PointerType>(SrcTy)) { 00788 // Must be an ptr to int cast. 00789 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); 00790 return Builder.CreatePtrToInt(Src, DstTy, "conv"); 00791 } 00792 00793 // A scalar can be splatted to an extended vector of the same element type 00794 if (DstType->isExtVectorType() && !SrcType->isVectorType()) { 00795 // Cast the scalar to element type 00796 QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType(); 00797 llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy); 00798 00799 // Splat the element across to all elements 00800 unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); 00801 return Builder.CreateVectorSplat(NumElements, Elt, "splat"); 00802 } 00803 00804 // Allow bitcast from vector to integer/fp of the same size. 00805 if (isa<llvm::VectorType>(SrcTy) || 00806 isa<llvm::VectorType>(DstTy)) 00807 return Builder.CreateBitCast(Src, DstTy, "conv"); 00808 00809 // Finally, we have the arithmetic types: real int/float. 00810 Value *Res = nullptr; 00811 llvm::Type *ResTy = DstTy; 00812 00813 // An overflowing conversion has undefined behavior if either the source type 00814 // or the destination type is a floating-point type. 00815 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) && 00816 (OrigSrcType->isFloatingType() || DstType->isFloatingType())) 00817 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, 00818 DstTy); 00819 00820 // Cast to half via float 00821 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType && 00822 !CGF.getContext().getLangOpts().HalfArgsAndReturns) 00823 DstTy = CGF.FloatTy; 00824 00825 if (isa<llvm::IntegerType>(SrcTy)) { 00826 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType(); 00827 if (isa<llvm::IntegerType>(DstTy)) 00828 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 00829 else if (InputSigned) 00830 Res = Builder.CreateSIToFP(Src, DstTy, "conv"); 00831 else 00832 Res = Builder.CreateUIToFP(Src, DstTy, "conv"); 00833 } else if (isa<llvm::IntegerType>(DstTy)) { 00834 assert(SrcTy->isFloatingPointTy() && "Unknown real conversion"); 00835 if (DstType->isSignedIntegerOrEnumerationType()) 00836 Res = Builder.CreateFPToSI(Src, DstTy, "conv"); 00837 else 00838 Res = Builder.CreateFPToUI(Src, DstTy, "conv"); 00839 } else { 00840 assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() && 00841 "Unknown real conversion"); 00842 if (DstTy->getTypeID() < SrcTy->getTypeID()) 00843 Res = Builder.CreateFPTrunc(Src, DstTy, "conv"); 00844 else 00845 Res = Builder.CreateFPExt(Src, DstTy, "conv"); 00846 } 00847 00848 if (DstTy != ResTy) { 00849 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion"); 00850 Res = Builder.CreateCall( 00851 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy), 00852 Res); 00853 } 00854 00855 return Res; 00856 } 00857 00858 /// EmitComplexToScalarConversion - Emit a conversion from the specified complex 00859 /// type to the specified destination type, where the destination type is an 00860 /// LLVM scalar type. 00861 Value *ScalarExprEmitter:: 00862 EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, 00863 QualType SrcTy, QualType DstTy) { 00864 // Get the source element type. 00865 SrcTy = SrcTy->castAs<ComplexType>()->getElementType(); 00866 00867 // Handle conversions to bool first, they are special: comparisons against 0. 00868 if (DstTy->isBooleanType()) { 00869 // Complex != 0 -> (Real != 0) | (Imag != 0) 00870 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy); 00871 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy); 00872 return Builder.CreateOr(Src.first, Src.second, "tobool"); 00873 } 00874 00875 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type, 00876 // the imaginary part of the complex value is discarded and the value of the 00877 // real part is converted according to the conversion rules for the 00878 // corresponding real type. 00879 return EmitScalarConversion(Src.first, SrcTy, DstTy); 00880 } 00881 00882 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) { 00883 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty); 00884 } 00885 00886 /// \brief Emit a sanitization check for the given "binary" operation (which 00887 /// might actually be a unary increment which has been lowered to a binary 00888 /// operation). The check passes if all values in \p Checks (which are \c i1), 00889 /// are \c true. 00890 void ScalarExprEmitter::EmitBinOpCheck( 00891 ArrayRef<std::pair<Value *, SanitizerKind>> Checks, const BinOpInfo &Info) { 00892 assert(CGF.IsSanitizerScope); 00893 StringRef CheckName; 00894 SmallVector<llvm::Constant *, 4> StaticData; 00895 SmallVector<llvm::Value *, 2> DynamicData; 00896 00897 BinaryOperatorKind Opcode = Info.Opcode; 00898 if (BinaryOperator::isCompoundAssignmentOp(Opcode)) 00899 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode); 00900 00901 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc())); 00902 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E); 00903 if (UO && UO->getOpcode() == UO_Minus) { 00904 CheckName = "negate_overflow"; 00905 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType())); 00906 DynamicData.push_back(Info.RHS); 00907 } else { 00908 if (BinaryOperator::isShiftOp(Opcode)) { 00909 // Shift LHS negative or too large, or RHS out of bounds. 00910 CheckName = "shift_out_of_bounds"; 00911 const BinaryOperator *BO = cast<BinaryOperator>(Info.E); 00912 StaticData.push_back( 00913 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType())); 00914 StaticData.push_back( 00915 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType())); 00916 } else if (Opcode == BO_Div || Opcode == BO_Rem) { 00917 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1). 00918 CheckName = "divrem_overflow"; 00919 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); 00920 } else { 00921 // Arithmetic overflow (+, -, *). 00922 switch (Opcode) { 00923 case BO_Add: CheckName = "add_overflow"; break; 00924 case BO_Sub: CheckName = "sub_overflow"; break; 00925 case BO_Mul: CheckName = "mul_overflow"; break; 00926 default: llvm_unreachable("unexpected opcode for bin op check"); 00927 } 00928 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); 00929 } 00930 DynamicData.push_back(Info.LHS); 00931 DynamicData.push_back(Info.RHS); 00932 } 00933 00934 CGF.EmitCheck(Checks, CheckName, StaticData, DynamicData); 00935 } 00936 00937 //===----------------------------------------------------------------------===// 00938 // Visitor Methods 00939 //===----------------------------------------------------------------------===// 00940 00941 Value *ScalarExprEmitter::VisitExpr(Expr *E) { 00942 CGF.ErrorUnsupported(E, "scalar expression"); 00943 if (E->getType()->isVoidType()) 00944 return nullptr; 00945 return llvm::UndefValue::get(CGF.ConvertType(E->getType())); 00946 } 00947 00948 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { 00949 // Vector Mask Case 00950 if (E->getNumSubExprs() == 2 || 00951 (E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) { 00952 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0)); 00953 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1)); 00954 Value *Mask; 00955 00956 llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType()); 00957 unsigned LHSElts = LTy->getNumElements(); 00958 00959 if (E->getNumSubExprs() == 3) { 00960 Mask = CGF.EmitScalarExpr(E->getExpr(2)); 00961 00962 // Shuffle LHS & RHS into one input vector. 00963 SmallVector<llvm::Constant*, 32> concat; 00964 for (unsigned i = 0; i != LHSElts; ++i) { 00965 concat.push_back(Builder.getInt32(2*i)); 00966 concat.push_back(Builder.getInt32(2*i+1)); 00967 } 00968 00969 Value* CV = llvm::ConstantVector::get(concat); 00970 LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat"); 00971 LHSElts *= 2; 00972 } else { 00973 Mask = RHS; 00974 } 00975 00976 llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType()); 00977 llvm::Constant* EltMask; 00978 00979 EltMask = llvm::ConstantInt::get(MTy->getElementType(), 00980 llvm::NextPowerOf2(LHSElts-1)-1); 00981 00982 // Mask off the high bits of each shuffle index. 00983 Value *MaskBits = llvm::ConstantVector::getSplat(MTy->getNumElements(), 00984 EltMask); 00985 Mask = Builder.CreateAnd(Mask, MaskBits, "mask"); 00986 00987 // newv = undef 00988 // mask = mask & maskbits 00989 // for each elt 00990 // n = extract mask i 00991 // x = extract val n 00992 // newv = insert newv, x, i 00993 llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(), 00994 MTy->getNumElements()); 00995 Value* NewV = llvm::UndefValue::get(RTy); 00996 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) { 00997 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i); 00998 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx"); 00999 01000 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt"); 01001 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins"); 01002 } 01003 return NewV; 01004 } 01005 01006 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0)); 01007 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1)); 01008 01009 SmallVector<llvm::Constant*, 32> indices; 01010 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) { 01011 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2); 01012 // Check for -1 and output it as undef in the IR. 01013 if (Idx.isSigned() && Idx.isAllOnesValue()) 01014 indices.push_back(llvm::UndefValue::get(CGF.Int32Ty)); 01015 else 01016 indices.push_back(Builder.getInt32(Idx.getZExtValue())); 01017 } 01018 01019 Value *SV = llvm::ConstantVector::get(indices); 01020 return Builder.CreateShuffleVector(V1, V2, SV, "shuffle"); 01021 } 01022 01023 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) { 01024 QualType SrcType = E->getSrcExpr()->getType(), 01025 DstType = E->getType(); 01026 01027 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 01028 01029 SrcType = CGF.getContext().getCanonicalType(SrcType); 01030 DstType = CGF.getContext().getCanonicalType(DstType); 01031 if (SrcType == DstType) return Src; 01032 01033 assert(SrcType->isVectorType() && 01034 "ConvertVector source type must be a vector"); 01035 assert(DstType->isVectorType() && 01036 "ConvertVector destination type must be a vector"); 01037 01038 llvm::Type *SrcTy = Src->getType(); 01039 llvm::Type *DstTy = ConvertType(DstType); 01040 01041 // Ignore conversions like int -> uint. 01042 if (SrcTy == DstTy) 01043 return Src; 01044 01045 QualType SrcEltType = SrcType->getAs<VectorType>()->getElementType(), 01046 DstEltType = DstType->getAs<VectorType>()->getElementType(); 01047 01048 assert(SrcTy->isVectorTy() && 01049 "ConvertVector source IR type must be a vector"); 01050 assert(DstTy->isVectorTy() && 01051 "ConvertVector destination IR type must be a vector"); 01052 01053 llvm::Type *SrcEltTy = SrcTy->getVectorElementType(), 01054 *DstEltTy = DstTy->getVectorElementType(); 01055 01056 if (DstEltType->isBooleanType()) { 01057 assert((SrcEltTy->isFloatingPointTy() || 01058 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion"); 01059 01060 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy); 01061 if (SrcEltTy->isFloatingPointTy()) { 01062 return Builder.CreateFCmpUNE(Src, Zero, "tobool"); 01063 } else { 01064 return Builder.CreateICmpNE(Src, Zero, "tobool"); 01065 } 01066 } 01067 01068 // We have the arithmetic types: real int/float. 01069 Value *Res = nullptr; 01070 01071 if (isa<llvm::IntegerType>(SrcEltTy)) { 01072 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType(); 01073 if (isa<llvm::IntegerType>(DstEltTy)) 01074 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 01075 else if (InputSigned) 01076 Res = Builder.CreateSIToFP(Src, DstTy, "conv"); 01077 else 01078 Res = Builder.CreateUIToFP(Src, DstTy, "conv"); 01079 } else if (isa<llvm::IntegerType>(DstEltTy)) { 01080 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion"); 01081 if (DstEltType->isSignedIntegerOrEnumerationType()) 01082 Res = Builder.CreateFPToSI(Src, DstTy, "conv"); 01083 else 01084 Res = Builder.CreateFPToUI(Src, DstTy, "conv"); 01085 } else { 01086 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && 01087 "Unknown real conversion"); 01088 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID()) 01089 Res = Builder.CreateFPTrunc(Src, DstTy, "conv"); 01090 else 01091 Res = Builder.CreateFPExt(Src, DstTy, "conv"); 01092 } 01093 01094 return Res; 01095 } 01096 01097 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { 01098 llvm::APSInt Value; 01099 if (E->EvaluateAsInt(Value, CGF.getContext(), Expr::SE_AllowSideEffects)) { 01100 if (E->isArrow()) 01101 CGF.EmitScalarExpr(E->getBase()); 01102 else 01103 EmitLValue(E->getBase()); 01104 return Builder.getInt(Value); 01105 } 01106 01107 return EmitLoadOfLValue(E); 01108 } 01109 01110 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 01111 TestAndClearIgnoreResultAssign(); 01112 01113 // Emit subscript expressions in rvalue context's. For most cases, this just 01114 // loads the lvalue formed by the subscript expr. However, we have to be 01115 // careful, because the base of a vector subscript is occasionally an rvalue, 01116 // so we can't get it as an lvalue. 01117 if (!E->getBase()->getType()->isVectorType()) 01118 return EmitLoadOfLValue(E); 01119 01120 // Handle the vector case. The base must be a vector, the index must be an 01121 // integer value. 01122 Value *Base = Visit(E->getBase()); 01123 Value *Idx = Visit(E->getIdx()); 01124 QualType IdxTy = E->getIdx()->getType(); 01125 01126 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) 01127 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true); 01128 01129 return Builder.CreateExtractElement(Base, Idx, "vecext"); 01130 } 01131 01132 static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, 01133 unsigned Off, llvm::Type *I32Ty) { 01134 int MV = SVI->getMaskValue(Idx); 01135 if (MV == -1) 01136 return llvm::UndefValue::get(I32Ty); 01137 return llvm::ConstantInt::get(I32Ty, Off+MV); 01138 } 01139 01140 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { 01141 bool Ignore = TestAndClearIgnoreResultAssign(); 01142 (void)Ignore; 01143 assert (Ignore == false && "init list ignored"); 01144 unsigned NumInitElements = E->getNumInits(); 01145 01146 if (E->hadArrayRangeDesignator()) 01147 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 01148 01149 llvm::VectorType *VType = 01150 dyn_cast<llvm::VectorType>(ConvertType(E->getType())); 01151 01152 if (!VType) { 01153 if (NumInitElements == 0) { 01154 // C++11 value-initialization for the scalar. 01155 return EmitNullValue(E->getType()); 01156 } 01157 // We have a scalar in braces. Just use the first element. 01158 return Visit(E->getInit(0)); 01159 } 01160 01161 unsigned ResElts = VType->getNumElements(); 01162 01163 // Loop over initializers collecting the Value for each, and remembering 01164 // whether the source was swizzle (ExtVectorElementExpr). This will allow 01165 // us to fold the shuffle for the swizzle into the shuffle for the vector 01166 // initializer, since LLVM optimizers generally do not want to touch 01167 // shuffles. 01168 unsigned CurIdx = 0; 01169 bool VIsUndefShuffle = false; 01170 llvm::Value *V = llvm::UndefValue::get(VType); 01171 for (unsigned i = 0; i != NumInitElements; ++i) { 01172 Expr *IE = E->getInit(i); 01173 Value *Init = Visit(IE); 01174 SmallVector<llvm::Constant*, 16> Args; 01175 01176 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType()); 01177 01178 // Handle scalar elements. If the scalar initializer is actually one 01179 // element of a different vector of the same width, use shuffle instead of 01180 // extract+insert. 01181 if (!VVT) { 01182 if (isa<ExtVectorElementExpr>(IE)) { 01183 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init); 01184 01185 if (EI->getVectorOperandType()->getNumElements() == ResElts) { 01186 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand()); 01187 Value *LHS = nullptr, *RHS = nullptr; 01188 if (CurIdx == 0) { 01189 // insert into undef -> shuffle (src, undef) 01190 Args.push_back(C); 01191 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty)); 01192 01193 LHS = EI->getVectorOperand(); 01194 RHS = V; 01195 VIsUndefShuffle = true; 01196 } else if (VIsUndefShuffle) { 01197 // insert into undefshuffle && size match -> shuffle (v, src) 01198 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V); 01199 for (unsigned j = 0; j != CurIdx; ++j) 01200 Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty)); 01201 Args.push_back(Builder.getInt32(ResElts + C->getZExtValue())); 01202 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty)); 01203 01204 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 01205 RHS = EI->getVectorOperand(); 01206 VIsUndefShuffle = false; 01207 } 01208 if (!Args.empty()) { 01209 llvm::Constant *Mask = llvm::ConstantVector::get(Args); 01210 V = Builder.CreateShuffleVector(LHS, RHS, Mask); 01211 ++CurIdx; 01212 continue; 01213 } 01214 } 01215 } 01216 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx), 01217 "vecinit"); 01218 VIsUndefShuffle = false; 01219 ++CurIdx; 01220 continue; 01221 } 01222 01223 unsigned InitElts = VVT->getNumElements(); 01224 01225 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's 01226 // input is the same width as the vector being constructed, generate an 01227 // optimized shuffle of the swizzle input into the result. 01228 unsigned Offset = (CurIdx == 0) ? 0 : ResElts; 01229 if (isa<ExtVectorElementExpr>(IE)) { 01230 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init); 01231 Value *SVOp = SVI->getOperand(0); 01232 llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType()); 01233 01234 if (OpTy->getNumElements() == ResElts) { 01235 for (unsigned j = 0; j != CurIdx; ++j) { 01236 // If the current vector initializer is a shuffle with undef, merge 01237 // this shuffle directly into it. 01238 if (VIsUndefShuffle) { 01239 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0, 01240 CGF.Int32Ty)); 01241 } else { 01242 Args.push_back(Builder.getInt32(j)); 01243 } 01244 } 01245 for (unsigned j = 0, je = InitElts; j != je; ++j) 01246 Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty)); 01247 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty)); 01248 01249 if (VIsUndefShuffle) 01250 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 01251 01252 Init = SVOp; 01253 } 01254 } 01255 01256 // Extend init to result vector length, and then shuffle its contribution 01257 // to the vector initializer into V. 01258 if (Args.empty()) { 01259 for (unsigned j = 0; j != InitElts; ++j) 01260 Args.push_back(Builder.getInt32(j)); 01261 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty)); 01262 llvm::Constant *Mask = llvm::ConstantVector::get(Args); 01263 Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT), 01264 Mask, "vext"); 01265 01266 Args.clear(); 01267 for (unsigned j = 0; j != CurIdx; ++j) 01268 Args.push_back(Builder.getInt32(j)); 01269 for (unsigned j = 0; j != InitElts; ++j) 01270 Args.push_back(Builder.getInt32(j+Offset)); 01271 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty)); 01272 } 01273 01274 // If V is undef, make sure it ends up on the RHS of the shuffle to aid 01275 // merging subsequent shuffles into this one. 01276 if (CurIdx == 0) 01277 std::swap(V, Init); 01278 llvm::Constant *Mask = llvm::ConstantVector::get(Args); 01279 V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit"); 01280 VIsUndefShuffle = isa<llvm::UndefValue>(Init); 01281 CurIdx += InitElts; 01282 } 01283 01284 // FIXME: evaluate codegen vs. shuffling against constant null vector. 01285 // Emit remaining default initializers. 01286 llvm::Type *EltTy = VType->getElementType(); 01287 01288 // Emit remaining default initializers 01289 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) { 01290 Value *Idx = Builder.getInt32(CurIdx); 01291 llvm::Value *Init = llvm::Constant::getNullValue(EltTy); 01292 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); 01293 } 01294 return V; 01295 } 01296 01297 static bool ShouldNullCheckClassCastValue(const CastExpr *CE) { 01298 const Expr *E = CE->getSubExpr(); 01299 01300 if (CE->getCastKind() == CK_UncheckedDerivedToBase) 01301 return false; 01302 01303 if (isa<CXXThisExpr>(E)) { 01304 // We always assume that 'this' is never null. 01305 return false; 01306 } 01307 01308 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 01309 // And that glvalue casts are never null. 01310 if (ICE->getValueKind() != VK_RValue) 01311 return false; 01312 } 01313 01314 return true; 01315 } 01316 01317 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts 01318 // have to handle a more broad range of conversions than explicit casts, as they 01319 // handle things like function to ptr-to-function decay etc. 01320 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { 01321 Expr *E = CE->getSubExpr(); 01322 QualType DestTy = CE->getType(); 01323 CastKind Kind = CE->getCastKind(); 01324 01325 if (!DestTy->isVoidType()) 01326 TestAndClearIgnoreResultAssign(); 01327 01328 // Since almost all cast kinds apply to scalars, this switch doesn't have 01329 // a default case, so the compiler will warn on a missing case. The cases 01330 // are in the same order as in the CastKind enum. 01331 switch (Kind) { 01332 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); 01333 case CK_BuiltinFnToFnPtr: 01334 llvm_unreachable("builtin functions are handled elsewhere"); 01335 01336 case CK_LValueBitCast: 01337 case CK_ObjCObjectLValueCast: { 01338 Value *V = EmitLValue(E).getAddress(); 01339 V = Builder.CreateBitCast(V, 01340 ConvertType(CGF.getContext().getPointerType(DestTy))); 01341 return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(V, DestTy), 01342 CE->getExprLoc()); 01343 } 01344 01345 case CK_CPointerToObjCPointerCast: 01346 case CK_BlockPointerToObjCPointerCast: 01347 case CK_AnyPointerToBlockPointerCast: 01348 case CK_BitCast: { 01349 Value *Src = Visit(const_cast<Expr*>(E)); 01350 llvm::Type *SrcTy = Src->getType(); 01351 llvm::Type *DstTy = ConvertType(DestTy); 01352 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() && 01353 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) { 01354 llvm::Type *MidTy = CGF.CGM.getDataLayout().getIntPtrType(SrcTy); 01355 return Builder.CreateIntToPtr(Builder.CreatePtrToInt(Src, MidTy), DstTy); 01356 } 01357 return Builder.CreateBitCast(Src, DstTy); 01358 } 01359 case CK_AddressSpaceConversion: { 01360 Value *Src = Visit(const_cast<Expr*>(E)); 01361 return Builder.CreateAddrSpaceCast(Src, ConvertType(DestTy)); 01362 } 01363 case CK_AtomicToNonAtomic: 01364 case CK_NonAtomicToAtomic: 01365 case CK_NoOp: 01366 case CK_UserDefinedConversion: 01367 return Visit(const_cast<Expr*>(E)); 01368 01369 case CK_BaseToDerived: { 01370 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl(); 01371 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"); 01372 01373 llvm::Value *V = Visit(E); 01374 01375 llvm::Value *Derived = 01376 CGF.GetAddressOfDerivedClass(V, DerivedClassDecl, 01377 CE->path_begin(), CE->path_end(), 01378 ShouldNullCheckClassCastValue(CE)); 01379 01380 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is 01381 // performed and the object is not of the derived type. 01382 if (CGF.sanitizePerformTypeCheck()) 01383 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(), 01384 Derived, DestTy->getPointeeType()); 01385 01386 return Derived; 01387 } 01388 case CK_UncheckedDerivedToBase: 01389 case CK_DerivedToBase: { 01390 const CXXRecordDecl *DerivedClassDecl = 01391 E->getType()->getPointeeCXXRecordDecl(); 01392 assert(DerivedClassDecl && "DerivedToBase arg isn't a C++ object pointer!"); 01393 01394 return CGF.GetAddressOfBaseClass( 01395 Visit(E), DerivedClassDecl, CE->path_begin(), CE->path_end(), 01396 ShouldNullCheckClassCastValue(CE), CE->getExprLoc()); 01397 } 01398 case CK_Dynamic: { 01399 Value *V = Visit(const_cast<Expr*>(E)); 01400 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE); 01401 return CGF.EmitDynamicCast(V, DCE); 01402 } 01403 01404 case CK_ArrayToPointerDecay: { 01405 assert(E->getType()->isArrayType() && 01406 "Array to pointer decay must have array source type!"); 01407 01408 Value *V = EmitLValue(E).getAddress(); // Bitfields can't be arrays. 01409 01410 // Note that VLA pointers are always decayed, so we don't need to do 01411 // anything here. 01412 if (!E->getType()->isVariableArrayType()) { 01413 assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer"); 01414 assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType()) 01415 ->getElementType()) && 01416 "Expected pointer to array"); 01417 V = Builder.CreateStructGEP(V, 0, "arraydecay"); 01418 } 01419 01420 // Make sure the array decay ends up being the right type. This matters if 01421 // the array type was of an incomplete type. 01422 return CGF.Builder.CreatePointerCast(V, ConvertType(CE->getType())); 01423 } 01424 case CK_FunctionToPointerDecay: 01425 return EmitLValue(E).getAddress(); 01426 01427 case CK_NullToPointer: 01428 if (MustVisitNullValue(E)) 01429 (void) Visit(E); 01430 01431 return llvm::ConstantPointerNull::get( 01432 cast<llvm::PointerType>(ConvertType(DestTy))); 01433 01434 case CK_NullToMemberPointer: { 01435 if (MustVisitNullValue(E)) 01436 (void) Visit(E); 01437 01438 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>(); 01439 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT); 01440 } 01441 01442 case CK_ReinterpretMemberPointer: 01443 case CK_BaseToDerivedMemberPointer: 01444 case CK_DerivedToBaseMemberPointer: { 01445 Value *Src = Visit(E); 01446 01447 // Note that the AST doesn't distinguish between checked and 01448 // unchecked member pointer conversions, so we always have to 01449 // implement checked conversions here. This is inefficient when 01450 // actual control flow may be required in order to perform the 01451 // check, which it is for data member pointers (but not member 01452 // function pointers on Itanium and ARM). 01453 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src); 01454 } 01455 01456 case CK_ARCProduceObject: 01457 return CGF.EmitARCRetainScalarExpr(E); 01458 case CK_ARCConsumeObject: 01459 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E)); 01460 case CK_ARCReclaimReturnedObject: { 01461 llvm::Value *value = Visit(E); 01462 value = CGF.EmitARCRetainAutoreleasedReturnValue(value); 01463 return CGF.EmitObjCConsumeObject(E->getType(), value); 01464 } 01465 case CK_ARCExtendBlockObject: 01466 return CGF.EmitARCExtendBlockObject(E); 01467 01468 case CK_CopyAndAutoreleaseBlockObject: 01469 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType()); 01470 01471 case CK_FloatingRealToComplex: 01472 case CK_FloatingComplexCast: 01473 case CK_IntegralRealToComplex: 01474 case CK_IntegralComplexCast: 01475 case CK_IntegralComplexToFloatingComplex: 01476 case CK_FloatingComplexToIntegralComplex: 01477 case CK_ConstructorConversion: 01478 case CK_ToUnion: 01479 llvm_unreachable("scalar cast to non-scalar value"); 01480 01481 case CK_LValueToRValue: 01482 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)); 01483 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); 01484 return Visit(const_cast<Expr*>(E)); 01485 01486 case CK_IntegralToPointer: { 01487 Value *Src = Visit(const_cast<Expr*>(E)); 01488 01489 // First, convert to the correct width so that we control the kind of 01490 // extension. 01491 llvm::Type *MiddleTy = CGF.IntPtrTy; 01492 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType(); 01493 llvm::Value* IntResult = 01494 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 01495 01496 return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy)); 01497 } 01498 case CK_PointerToIntegral: 01499 assert(!DestTy->isBooleanType() && "bool should use PointerToBool"); 01500 return Builder.CreatePtrToInt(Visit(E), ConvertType(DestTy)); 01501 01502 case CK_ToVoid: { 01503 CGF.EmitIgnoredExpr(E); 01504 return nullptr; 01505 } 01506 case CK_VectorSplat: { 01507 llvm::Type *DstTy = ConvertType(DestTy); 01508 Value *Elt = Visit(const_cast<Expr*>(E)); 01509 Elt = EmitScalarConversion(Elt, E->getType(), 01510 DestTy->getAs<VectorType>()->getElementType()); 01511 01512 // Splat the element across to all elements 01513 unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); 01514 return Builder.CreateVectorSplat(NumElements, Elt, "splat"); 01515 } 01516 01517 case CK_IntegralCast: 01518 case CK_IntegralToFloating: 01519 case CK_FloatingToIntegral: 01520 case CK_FloatingCast: 01521 return EmitScalarConversion(Visit(E), E->getType(), DestTy); 01522 case CK_IntegralToBoolean: 01523 return EmitIntToBoolConversion(Visit(E)); 01524 case CK_PointerToBoolean: 01525 return EmitPointerToBoolConversion(Visit(E)); 01526 case CK_FloatingToBoolean: 01527 return EmitFloatToBoolConversion(Visit(E)); 01528 case CK_MemberPointerToBoolean: { 01529 llvm::Value *MemPtr = Visit(E); 01530 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>(); 01531 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT); 01532 } 01533 01534 case CK_FloatingComplexToReal: 01535 case CK_IntegralComplexToReal: 01536 return CGF.EmitComplexExpr(E, false, true).first; 01537 01538 case CK_FloatingComplexToBoolean: 01539 case CK_IntegralComplexToBoolean: { 01540 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E); 01541 01542 // TODO: kill this function off, inline appropriate case here 01543 return EmitComplexToScalarConversion(V, E->getType(), DestTy); 01544 } 01545 01546 case CK_ZeroToOCLEvent: { 01547 assert(DestTy->isEventT() && "CK_ZeroToOCLEvent cast on non-event type"); 01548 return llvm::Constant::getNullValue(ConvertType(DestTy)); 01549 } 01550 01551 } 01552 01553 llvm_unreachable("unknown scalar cast"); 01554 } 01555 01556 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { 01557 CodeGenFunction::StmtExprEvaluation eval(CGF); 01558 llvm::Value *RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), 01559 !E->getType()->isVoidType()); 01560 if (!RetAlloca) 01561 return nullptr; 01562 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()), 01563 E->getExprLoc()); 01564 } 01565 01566 //===----------------------------------------------------------------------===// 01567 // Unary Operators 01568 //===----------------------------------------------------------------------===// 01569 01570 llvm::Value *ScalarExprEmitter:: 01571 EmitAddConsiderOverflowBehavior(const UnaryOperator *E, 01572 llvm::Value *InVal, 01573 llvm::Value *NextVal, bool IsInc) { 01574 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 01575 case LangOptions::SOB_Defined: 01576 return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec"); 01577 case LangOptions::SOB_Undefined: 01578 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 01579 return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec"); 01580 // Fall through. 01581 case LangOptions::SOB_Trapping: 01582 BinOpInfo BinOp; 01583 BinOp.LHS = InVal; 01584 BinOp.RHS = NextVal; 01585 BinOp.Ty = E->getType(); 01586 BinOp.Opcode = BO_Add; 01587 BinOp.FPContractable = false; 01588 BinOp.E = E; 01589 return EmitOverflowCheckedBinOp(BinOp); 01590 } 01591 llvm_unreachable("Unknown SignedOverflowBehaviorTy"); 01592 } 01593 01594 llvm::Value * 01595 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 01596 bool isInc, bool isPre) { 01597 01598 QualType type = E->getSubExpr()->getType(); 01599 llvm::PHINode *atomicPHI = nullptr; 01600 llvm::Value *value; 01601 llvm::Value *input; 01602 01603 int amount = (isInc ? 1 : -1); 01604 01605 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) { 01606 type = atomicTy->getValueType(); 01607 if (isInc && type->isBooleanType()) { 01608 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type); 01609 if (isPre) { 01610 Builder.Insert(new llvm::StoreInst(True, 01611 LV.getAddress(), LV.isVolatileQualified(), 01612 LV.getAlignment().getQuantity(), 01613 llvm::SequentiallyConsistent)); 01614 return Builder.getTrue(); 01615 } 01616 // For atomic bool increment, we just store true and return it for 01617 // preincrement, do an atomic swap with true for postincrement 01618 return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 01619 LV.getAddress(), True, llvm::SequentiallyConsistent); 01620 } 01621 // Special case for atomic increment / decrement on integers, emit 01622 // atomicrmw instructions. We skip this if we want to be doing overflow 01623 // checking, and fall into the slow path with the atomic cmpxchg loop. 01624 if (!type->isBooleanType() && type->isIntegerType() && 01625 !(type->isUnsignedIntegerType() && 01626 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && 01627 CGF.getLangOpts().getSignedOverflowBehavior() != 01628 LangOptions::SOB_Trapping) { 01629 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add : 01630 llvm::AtomicRMWInst::Sub; 01631 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add : 01632 llvm::Instruction::Sub; 01633 llvm::Value *amt = CGF.EmitToMemory( 01634 llvm::ConstantInt::get(ConvertType(type), 1, true), type); 01635 llvm::Value *old = Builder.CreateAtomicRMW(aop, 01636 LV.getAddress(), amt, llvm::SequentiallyConsistent); 01637 return isPre ? Builder.CreateBinOp(op, old, amt) : old; 01638 } 01639 value = EmitLoadOfLValue(LV, E->getExprLoc()); 01640 input = value; 01641 // For every other atomic operation, we need to emit a load-op-cmpxchg loop 01642 llvm::BasicBlock *startBB = Builder.GetInsertBlock(); 01643 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); 01644 value = CGF.EmitToMemory(value, type); 01645 Builder.CreateBr(opBB); 01646 Builder.SetInsertPoint(opBB); 01647 atomicPHI = Builder.CreatePHI(value->getType(), 2); 01648 atomicPHI->addIncoming(value, startBB); 01649 value = atomicPHI; 01650 } else { 01651 value = EmitLoadOfLValue(LV, E->getExprLoc()); 01652 input = value; 01653 } 01654 01655 // Special case of integer increment that we have to check first: bool++. 01656 // Due to promotion rules, we get: 01657 // bool++ -> bool = bool + 1 01658 // -> bool = (int)bool + 1 01659 // -> bool = ((int)bool + 1 != 0) 01660 // An interesting aspect of this is that increment is always true. 01661 // Decrement does not have this property. 01662 if (isInc && type->isBooleanType()) { 01663 value = Builder.getTrue(); 01664 01665 // Most common case by far: integer increment. 01666 } else if (type->isIntegerType()) { 01667 01668 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); 01669 01670 // Note that signed integer inc/dec with width less than int can't 01671 // overflow because of promotion rules; we're just eliding a few steps here. 01672 bool CanOverflow = value->getType()->getIntegerBitWidth() >= 01673 CGF.IntTy->getIntegerBitWidth(); 01674 if (CanOverflow && type->isSignedIntegerOrEnumerationType()) { 01675 value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc); 01676 } else if (CanOverflow && type->isUnsignedIntegerType() && 01677 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) { 01678 BinOpInfo BinOp; 01679 BinOp.LHS = value; 01680 BinOp.RHS = llvm::ConstantInt::get(value->getType(), 1, false); 01681 BinOp.Ty = E->getType(); 01682 BinOp.Opcode = isInc ? BO_Add : BO_Sub; 01683 BinOp.FPContractable = false; 01684 BinOp.E = E; 01685 value = EmitOverflowCheckedBinOp(BinOp); 01686 } else 01687 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 01688 01689 // Next most common: pointer increment. 01690 } else if (const PointerType *ptr = type->getAs<PointerType>()) { 01691 QualType type = ptr->getPointeeType(); 01692 01693 // VLA types don't have constant size. 01694 if (const VariableArrayType *vla 01695 = CGF.getContext().getAsVariableArrayType(type)) { 01696 llvm::Value *numElts = CGF.getVLASize(vla).first; 01697 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize"); 01698 if (CGF.getLangOpts().isSignedOverflowDefined()) 01699 value = Builder.CreateGEP(value, numElts, "vla.inc"); 01700 else 01701 value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc"); 01702 01703 // Arithmetic on function pointers (!) is just +-1. 01704 } else if (type->isFunctionType()) { 01705 llvm::Value *amt = Builder.getInt32(amount); 01706 01707 value = CGF.EmitCastToVoidPtr(value); 01708 if (CGF.getLangOpts().isSignedOverflowDefined()) 01709 value = Builder.CreateGEP(value, amt, "incdec.funcptr"); 01710 else 01711 value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr"); 01712 value = Builder.CreateBitCast(value, input->getType()); 01713 01714 // For everything else, we can just do a simple increment. 01715 } else { 01716 llvm::Value *amt = Builder.getInt32(amount); 01717 if (CGF.getLangOpts().isSignedOverflowDefined()) 01718 value = Builder.CreateGEP(value, amt, "incdec.ptr"); 01719 else 01720 value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr"); 01721 } 01722 01723 // Vector increment/decrement. 01724 } else if (type->isVectorType()) { 01725 if (type->hasIntegerRepresentation()) { 01726 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount); 01727 01728 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 01729 } else { 01730 value = Builder.CreateFAdd( 01731 value, 01732 llvm::ConstantFP::get(value->getType(), amount), 01733 isInc ? "inc" : "dec"); 01734 } 01735 01736 // Floating point. 01737 } else if (type->isRealFloatingType()) { 01738 // Add the inc/dec to the real part. 01739 llvm::Value *amt; 01740 01741 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType && 01742 !CGF.getContext().getLangOpts().HalfArgsAndReturns) { 01743 // Another special case: half FP increment should be done via float 01744 value = Builder.CreateCall( 01745 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, 01746 CGF.CGM.FloatTy), 01747 input); 01748 } 01749 01750 if (value->getType()->isFloatTy()) 01751 amt = llvm::ConstantFP::get(VMContext, 01752 llvm::APFloat(static_cast<float>(amount))); 01753 else if (value->getType()->isDoubleTy()) 01754 amt = llvm::ConstantFP::get(VMContext, 01755 llvm::APFloat(static_cast<double>(amount))); 01756 else { 01757 llvm::APFloat F(static_cast<float>(amount)); 01758 bool ignored; 01759 F.convert(CGF.getTarget().getLongDoubleFormat(), 01760 llvm::APFloat::rmTowardZero, &ignored); 01761 amt = llvm::ConstantFP::get(VMContext, F); 01762 } 01763 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec"); 01764 01765 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType && 01766 !CGF.getContext().getLangOpts().HalfArgsAndReturns) 01767 value = Builder.CreateCall( 01768 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, 01769 CGF.CGM.FloatTy), 01770 value); 01771 01772 // Objective-C pointer types. 01773 } else { 01774 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>(); 01775 value = CGF.EmitCastToVoidPtr(value); 01776 01777 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType()); 01778 if (!isInc) size = -size; 01779 llvm::Value *sizeValue = 01780 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity()); 01781 01782 if (CGF.getLangOpts().isSignedOverflowDefined()) 01783 value = Builder.CreateGEP(value, sizeValue, "incdec.objptr"); 01784 else 01785 value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr"); 01786 value = Builder.CreateBitCast(value, input->getType()); 01787 } 01788 01789 if (atomicPHI) { 01790 llvm::BasicBlock *opBB = Builder.GetInsertBlock(); 01791 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); 01792 llvm::Value *pair = Builder.CreateAtomicCmpXchg( 01793 LV.getAddress(), atomicPHI, CGF.EmitToMemory(value, type), 01794 llvm::SequentiallyConsistent, llvm::SequentiallyConsistent); 01795 llvm::Value *old = Builder.CreateExtractValue(pair, 0); 01796 llvm::Value *success = Builder.CreateExtractValue(pair, 1); 01797 atomicPHI->addIncoming(old, opBB); 01798 Builder.CreateCondBr(success, contBB, opBB); 01799 Builder.SetInsertPoint(contBB); 01800 return isPre ? value : input; 01801 } 01802 01803 // Store the updated result through the lvalue. 01804 if (LV.isBitField()) 01805 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value); 01806 else 01807 CGF.EmitStoreThroughLValue(RValue::get(value), LV); 01808 01809 // If this is a postinc, return the value read from memory, otherwise use the 01810 // updated value. 01811 return isPre ? value : input; 01812 } 01813 01814 01815 01816 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { 01817 TestAndClearIgnoreResultAssign(); 01818 // Emit unary minus with EmitSub so we handle overflow cases etc. 01819 BinOpInfo BinOp; 01820 BinOp.RHS = Visit(E->getSubExpr()); 01821 01822 if (BinOp.RHS->getType()->isFPOrFPVectorTy()) 01823 BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType()); 01824 else 01825 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType()); 01826 BinOp.Ty = E->getType(); 01827 BinOp.Opcode = BO_Sub; 01828 BinOp.FPContractable = false; 01829 BinOp.E = E; 01830 return EmitSub(BinOp); 01831 } 01832 01833 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { 01834 TestAndClearIgnoreResultAssign(); 01835 Value *Op = Visit(E->getSubExpr()); 01836 return Builder.CreateNot(Op, "neg"); 01837 } 01838 01839 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { 01840 // Perform vector logical not on comparison with zero vector. 01841 if (E->getType()->isExtVectorType()) { 01842 Value *Oper = Visit(E->getSubExpr()); 01843 Value *Zero = llvm::Constant::getNullValue(Oper->getType()); 01844 Value *Result; 01845 if (Oper->getType()->isFPOrFPVectorTy()) 01846 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp"); 01847 else 01848 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp"); 01849 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 01850 } 01851 01852 // Compare operand to zero. 01853 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); 01854 01855 // Invert value. 01856 // TODO: Could dynamically modify easy computations here. For example, if 01857 // the operand is an icmp ne, turn into icmp eq. 01858 BoolVal = Builder.CreateNot(BoolVal, "lnot"); 01859 01860 // ZExt result to the expr type. 01861 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext"); 01862 } 01863 01864 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) { 01865 // Try folding the offsetof to a constant. 01866 llvm::APSInt Value; 01867 if (E->EvaluateAsInt(Value, CGF.getContext())) 01868 return Builder.getInt(Value); 01869 01870 // Loop over the components of the offsetof to compute the value. 01871 unsigned n = E->getNumComponents(); 01872 llvm::Type* ResultType = ConvertType(E->getType()); 01873 llvm::Value* Result = llvm::Constant::getNullValue(ResultType); 01874 QualType CurrentType = E->getTypeSourceInfo()->getType(); 01875 for (unsigned i = 0; i != n; ++i) { 01876 OffsetOfExpr::OffsetOfNode ON = E->getComponent(i); 01877 llvm::Value *Offset = nullptr; 01878 switch (ON.getKind()) { 01879 case OffsetOfExpr::OffsetOfNode::Array: { 01880 // Compute the index 01881 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex()); 01882 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr); 01883 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType(); 01884 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv"); 01885 01886 // Save the element type 01887 CurrentType = 01888 CGF.getContext().getAsArrayType(CurrentType)->getElementType(); 01889 01890 // Compute the element size 01891 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType, 01892 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity()); 01893 01894 // Multiply out to compute the result 01895 Offset = Builder.CreateMul(Idx, ElemSize); 01896 break; 01897 } 01898 01899 case OffsetOfExpr::OffsetOfNode::Field: { 01900 FieldDecl *MemberDecl = ON.getField(); 01901 RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl(); 01902 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 01903 01904 // Compute the index of the field in its parent. 01905 unsigned i = 0; 01906 // FIXME: It would be nice if we didn't have to loop here! 01907 for (RecordDecl::field_iterator Field = RD->field_begin(), 01908 FieldEnd = RD->field_end(); 01909 Field != FieldEnd; ++Field, ++i) { 01910 if (*Field == MemberDecl) 01911 break; 01912 } 01913 assert(i < RL.getFieldCount() && "offsetof field in wrong type"); 01914 01915 // Compute the offset to the field 01916 int64_t OffsetInt = RL.getFieldOffset(i) / 01917 CGF.getContext().getCharWidth(); 01918 Offset = llvm::ConstantInt::get(ResultType, OffsetInt); 01919 01920 // Save the element type. 01921 CurrentType = MemberDecl->getType(); 01922 break; 01923 } 01924 01925 case OffsetOfExpr::OffsetOfNode::Identifier: 01926 llvm_unreachable("dependent __builtin_offsetof"); 01927 01928 case OffsetOfExpr::OffsetOfNode::Base: { 01929 if (ON.getBase()->isVirtual()) { 01930 CGF.ErrorUnsupported(E, "virtual base in offsetof"); 01931 continue; 01932 } 01933 01934 RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl(); 01935 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 01936 01937 // Save the element type. 01938 CurrentType = ON.getBase()->getType(); 01939 01940 // Compute the offset to the base. 01941 const RecordType *BaseRT = CurrentType->getAs<RecordType>(); 01942 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl()); 01943 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD); 01944 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity()); 01945 break; 01946 } 01947 } 01948 Result = Builder.CreateAdd(Result, Offset); 01949 } 01950 return Result; 01951 } 01952 01953 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of 01954 /// argument of the sizeof expression as an integer. 01955 Value * 01956 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( 01957 const UnaryExprOrTypeTraitExpr *E) { 01958 QualType TypeToSize = E->getTypeOfArgument(); 01959 if (E->getKind() == UETT_SizeOf) { 01960 if (const VariableArrayType *VAT = 01961 CGF.getContext().getAsVariableArrayType(TypeToSize)) { 01962 if (E->isArgumentType()) { 01963 // sizeof(type) - make sure to emit the VLA size. 01964 CGF.EmitVariablyModifiedType(TypeToSize); 01965 } else { 01966 // C99 6.5.3.4p2: If the argument is an expression of type 01967 // VLA, it is evaluated. 01968 CGF.EmitIgnoredExpr(E->getArgumentExpr()); 01969 } 01970 01971 QualType eltType; 01972 llvm::Value *numElts; 01973 std::tie(numElts, eltType) = CGF.getVLASize(VAT); 01974 01975 llvm::Value *size = numElts; 01976 01977 // Scale the number of non-VLA elements by the non-VLA element size. 01978 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType); 01979 if (!eltSize.isOne()) 01980 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), numElts); 01981 01982 return size; 01983 } 01984 } 01985 01986 // If this isn't sizeof(vla), the result must be constant; use the constant 01987 // folding logic so we don't have to duplicate it here. 01988 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext())); 01989 } 01990 01991 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) { 01992 Expr *Op = E->getSubExpr(); 01993 if (Op->getType()->isAnyComplexType()) { 01994 // If it's an l-value, load through the appropriate subobject l-value. 01995 // Note that we have to ask E because Op might be an l-value that 01996 // this won't work for, e.g. an Obj-C property. 01997 if (E->isGLValue()) 01998 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), 01999 E->getExprLoc()).getScalarVal(); 02000 02001 // Otherwise, calculate and project. 02002 return CGF.EmitComplexExpr(Op, false, true).first; 02003 } 02004 02005 return Visit(Op); 02006 } 02007 02008 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) { 02009 Expr *Op = E->getSubExpr(); 02010 if (Op->getType()->isAnyComplexType()) { 02011 // If it's an l-value, load through the appropriate subobject l-value. 02012 // Note that we have to ask E because Op might be an l-value that 02013 // this won't work for, e.g. an Obj-C property. 02014 if (Op->isGLValue()) 02015 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), 02016 E->getExprLoc()).getScalarVal(); 02017 02018 // Otherwise, calculate and project. 02019 return CGF.EmitComplexExpr(Op, true, false).second; 02020 } 02021 02022 // __imag on a scalar returns zero. Emit the subexpr to ensure side 02023 // effects are evaluated, but not the actual value. 02024 if (Op->isGLValue()) 02025 CGF.EmitLValue(Op); 02026 else 02027 CGF.EmitScalarExpr(Op, true); 02028 return llvm::Constant::getNullValue(ConvertType(E->getType())); 02029 } 02030 02031 //===----------------------------------------------------------------------===// 02032 // Binary Operators 02033 //===----------------------------------------------------------------------===// 02034 02035 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) { 02036 TestAndClearIgnoreResultAssign(); 02037 BinOpInfo Result; 02038 Result.LHS = Visit(E->getLHS()); 02039 Result.RHS = Visit(E->getRHS()); 02040 Result.Ty = E->getType(); 02041 Result.Opcode = E->getOpcode(); 02042 Result.FPContractable = E->isFPContractable(); 02043 Result.E = E; 02044 return Result; 02045 } 02046 02047 LValue ScalarExprEmitter::EmitCompoundAssignLValue( 02048 const CompoundAssignOperator *E, 02049 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &), 02050 Value *&Result) { 02051 QualType LHSTy = E->getLHS()->getType(); 02052 BinOpInfo OpInfo; 02053 02054 if (E->getComputationResultType()->isAnyComplexType()) 02055 return CGF.EmitScalarCompooundAssignWithComplex(E, Result); 02056 02057 // Emit the RHS first. __block variables need to have the rhs evaluated 02058 // first, plus this should improve codegen a little. 02059 OpInfo.RHS = Visit(E->getRHS()); 02060 OpInfo.Ty = E->getComputationResultType(); 02061 OpInfo.Opcode = E->getOpcode(); 02062 OpInfo.FPContractable = false; 02063 OpInfo.E = E; 02064 // Load/convert the LHS. 02065 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 02066 02067 llvm::PHINode *atomicPHI = nullptr; 02068 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) { 02069 QualType type = atomicTy->getValueType(); 02070 if (!type->isBooleanType() && type->isIntegerType() && 02071 !(type->isUnsignedIntegerType() && 02072 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && 02073 CGF.getLangOpts().getSignedOverflowBehavior() != 02074 LangOptions::SOB_Trapping) { 02075 llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP; 02076 switch (OpInfo.Opcode) { 02077 // We don't have atomicrmw operands for *, %, /, <<, >> 02078 case BO_MulAssign: case BO_DivAssign: 02079 case BO_RemAssign: 02080 case BO_ShlAssign: 02081 case BO_ShrAssign: 02082 break; 02083 case BO_AddAssign: 02084 aop = llvm::AtomicRMWInst::Add; 02085 break; 02086 case BO_SubAssign: 02087 aop = llvm::AtomicRMWInst::Sub; 02088 break; 02089 case BO_AndAssign: 02090 aop = llvm::AtomicRMWInst::And; 02091 break; 02092 case BO_XorAssign: 02093 aop = llvm::AtomicRMWInst::Xor; 02094 break; 02095 case BO_OrAssign: 02096 aop = llvm::AtomicRMWInst::Or; 02097 break; 02098 default: 02099 llvm_unreachable("Invalid compound assignment type"); 02100 } 02101 if (aop != llvm::AtomicRMWInst::BAD_BINOP) { 02102 llvm::Value *amt = CGF.EmitToMemory(EmitScalarConversion(OpInfo.RHS, 02103 E->getRHS()->getType(), LHSTy), LHSTy); 02104 Builder.CreateAtomicRMW(aop, LHSLV.getAddress(), amt, 02105 llvm::SequentiallyConsistent); 02106 return LHSLV; 02107 } 02108 } 02109 // FIXME: For floating point types, we should be saving and restoring the 02110 // floating point environment in the loop. 02111 llvm::BasicBlock *startBB = Builder.GetInsertBlock(); 02112 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); 02113 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); 02114 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type); 02115 Builder.CreateBr(opBB); 02116 Builder.SetInsertPoint(opBB); 02117 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2); 02118 atomicPHI->addIncoming(OpInfo.LHS, startBB); 02119 OpInfo.LHS = atomicPHI; 02120 } 02121 else 02122 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); 02123 02124 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, 02125 E->getComputationLHSType()); 02126 02127 // Expand the binary operator. 02128 Result = (this->*Func)(OpInfo); 02129 02130 // Convert the result back to the LHS type. 02131 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy); 02132 02133 if (atomicPHI) { 02134 llvm::BasicBlock *opBB = Builder.GetInsertBlock(); 02135 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); 02136 llvm::Value *pair = Builder.CreateAtomicCmpXchg( 02137 LHSLV.getAddress(), atomicPHI, CGF.EmitToMemory(Result, LHSTy), 02138 llvm::SequentiallyConsistent, llvm::SequentiallyConsistent); 02139 llvm::Value *old = Builder.CreateExtractValue(pair, 0); 02140 llvm::Value *success = Builder.CreateExtractValue(pair, 1); 02141 atomicPHI->addIncoming(old, opBB); 02142 Builder.CreateCondBr(success, contBB, opBB); 02143 Builder.SetInsertPoint(contBB); 02144 return LHSLV; 02145 } 02146 02147 // Store the result value into the LHS lvalue. Bit-fields are handled 02148 // specially because the result is altered by the store, i.e., [C99 6.5.16p1] 02149 // 'An assignment expression has the value of the left operand after the 02150 // assignment...'. 02151 if (LHSLV.isBitField()) 02152 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result); 02153 else 02154 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV); 02155 02156 return LHSLV; 02157 } 02158 02159 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, 02160 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) { 02161 bool Ignore = TestAndClearIgnoreResultAssign(); 02162 Value *RHS; 02163 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS); 02164 02165 // If the result is clearly ignored, return now. 02166 if (Ignore) 02167 return nullptr; 02168 02169 // The result of an assignment in C is the assigned r-value. 02170 if (!CGF.getLangOpts().CPlusPlus) 02171 return RHS; 02172 02173 // If the lvalue is non-volatile, return the computed value of the assignment. 02174 if (!LHS.isVolatileQualified()) 02175 return RHS; 02176 02177 // Otherwise, reload the value. 02178 return EmitLoadOfLValue(LHS, E->getExprLoc()); 02179 } 02180 02181 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( 02182 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) { 02183 SmallVector<std::pair<llvm::Value *, SanitizerKind>, 2> Checks; 02184 02185 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) { 02186 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero), 02187 SanitizerKind::IntegerDivideByZero)); 02188 } 02189 02190 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) && 02191 Ops.Ty->hasSignedIntegerRepresentation()) { 02192 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType()); 02193 02194 llvm::Value *IntMin = 02195 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth())); 02196 llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL); 02197 02198 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin); 02199 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne); 02200 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or"); 02201 Checks.push_back( 02202 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow)); 02203 } 02204 02205 if (Checks.size() > 0) 02206 EmitBinOpCheck(Checks, Ops); 02207 } 02208 02209 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { 02210 { 02211 CodeGenFunction::SanitizerScope SanScope(&CGF); 02212 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || 02213 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && 02214 Ops.Ty->isIntegerType()) { 02215 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 02216 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true); 02217 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) && 02218 Ops.Ty->isRealFloatingType()) { 02219 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 02220 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero); 02221 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero), 02222 Ops); 02223 } 02224 } 02225 02226 if (Ops.LHS->getType()->isFPOrFPVectorTy()) { 02227 llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); 02228 if (CGF.getLangOpts().OpenCL) { 02229 // OpenCL 1.1 7.4: minimum accuracy of single precision / is 2.5ulp 02230 llvm::Type *ValTy = Val->getType(); 02231 if (ValTy->isFloatTy() || 02232 (isa<llvm::VectorType>(ValTy) && 02233 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy())) 02234 CGF.SetFPAccuracy(Val, 2.5); 02235 } 02236 return Val; 02237 } 02238 else if (Ops.Ty->hasUnsignedIntegerRepresentation()) 02239 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div"); 02240 else 02241 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div"); 02242 } 02243 02244 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { 02245 // Rem in C can't be a floating point type: C99 6.5.5p2. 02246 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) { 02247 CodeGenFunction::SanitizerScope SanScope(&CGF); 02248 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 02249 02250 if (Ops.Ty->isIntegerType()) 02251 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false); 02252 } 02253 02254 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 02255 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem"); 02256 else 02257 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem"); 02258 } 02259 02260 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { 02261 unsigned IID; 02262 unsigned OpID = 0; 02263 02264 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType(); 02265 switch (Ops.Opcode) { 02266 case BO_Add: 02267 case BO_AddAssign: 02268 OpID = 1; 02269 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow : 02270 llvm::Intrinsic::uadd_with_overflow; 02271 break; 02272 case BO_Sub: 02273 case BO_SubAssign: 02274 OpID = 2; 02275 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow : 02276 llvm::Intrinsic::usub_with_overflow; 02277 break; 02278 case BO_Mul: 02279 case BO_MulAssign: 02280 OpID = 3; 02281 IID = isSigned ? llvm::Intrinsic::smul_with_overflow : 02282 llvm::Intrinsic::umul_with_overflow; 02283 break; 02284 default: 02285 llvm_unreachable("Unsupported operation for overflow detection"); 02286 } 02287 OpID <<= 1; 02288 if (isSigned) 02289 OpID |= 1; 02290 02291 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); 02292 02293 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy); 02294 02295 Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS); 02296 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0); 02297 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1); 02298 02299 // Handle overflow with llvm.trap if no custom handler has been specified. 02300 const std::string *handlerName = 02301 &CGF.getLangOpts().OverflowHandler; 02302 if (handlerName->empty()) { 02303 // If the signed-integer-overflow sanitizer is enabled, emit a call to its 02304 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap. 02305 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) { 02306 CodeGenFunction::SanitizerScope SanScope(&CGF); 02307 llvm::Value *NotOverflow = Builder.CreateNot(overflow); 02308 SanitizerKind Kind = isSigned ? SanitizerKind::SignedIntegerOverflow 02309 : SanitizerKind::UnsignedIntegerOverflow; 02310 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops); 02311 } else 02312 CGF.EmitTrapCheck(Builder.CreateNot(overflow)); 02313 return result; 02314 } 02315 02316 // Branch in case of overflow. 02317 llvm::BasicBlock *initialBB = Builder.GetInsertBlock(); 02318 llvm::Function::iterator insertPt = initialBB; 02319 llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn, 02320 std::next(insertPt)); 02321 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn); 02322 02323 Builder.CreateCondBr(overflow, overflowBB, continueBB); 02324 02325 // If an overflow handler is set, then we want to call it and then use its 02326 // result, if it returns. 02327 Builder.SetInsertPoint(overflowBB); 02328 02329 // Get the overflow handler. 02330 llvm::Type *Int8Ty = CGF.Int8Ty; 02331 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty }; 02332 llvm::FunctionType *handlerTy = 02333 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true); 02334 llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName); 02335 02336 // Sign extend the args to 64-bit, so that we can use the same handler for 02337 // all types of overflow. 02338 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty); 02339 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty); 02340 02341 // Call the handler with the two arguments, the operation, and the size of 02342 // the result. 02343 llvm::Value *handlerArgs[] = { 02344 lhs, 02345 rhs, 02346 Builder.getInt8(OpID), 02347 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth()) 02348 }; 02349 llvm::Value *handlerResult = 02350 CGF.EmitNounwindRuntimeCall(handler, handlerArgs); 02351 02352 // Truncate the result back to the desired size. 02353 handlerResult = Builder.CreateTrunc(handlerResult, opTy); 02354 Builder.CreateBr(continueBB); 02355 02356 Builder.SetInsertPoint(continueBB); 02357 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2); 02358 phi->addIncoming(result, initialBB); 02359 phi->addIncoming(handlerResult, overflowBB); 02360 02361 return phi; 02362 } 02363 02364 /// Emit pointer + index arithmetic. 02365 static Value *emitPointerArithmetic(CodeGenFunction &CGF, 02366 const BinOpInfo &op, 02367 bool isSubtraction) { 02368 // Must have binary (not unary) expr here. Unary pointer 02369 // increment/decrement doesn't use this path. 02370 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 02371 02372 Value *pointer = op.LHS; 02373 Expr *pointerOperand = expr->getLHS(); 02374 Value *index = op.RHS; 02375 Expr *indexOperand = expr->getRHS(); 02376 02377 // In a subtraction, the LHS is always the pointer. 02378 if (!isSubtraction && !pointer->getType()->isPointerTy()) { 02379 std::swap(pointer, index); 02380 std::swap(pointerOperand, indexOperand); 02381 } 02382 02383 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth(); 02384 if (width != CGF.PointerWidthInBits) { 02385 // Zero-extend or sign-extend the pointer value according to 02386 // whether the index is signed or not. 02387 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); 02388 index = CGF.Builder.CreateIntCast(index, CGF.PtrDiffTy, isSigned, 02389 "idx.ext"); 02390 } 02391 02392 // If this is subtraction, negate the index. 02393 if (isSubtraction) 02394 index = CGF.Builder.CreateNeg(index, "idx.neg"); 02395 02396 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) 02397 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(), 02398 /*Accessed*/ false); 02399 02400 const PointerType *pointerType 02401 = pointerOperand->getType()->getAs<PointerType>(); 02402 if (!pointerType) { 02403 QualType objectType = pointerOperand->getType() 02404 ->castAs<ObjCObjectPointerType>() 02405 ->getPointeeType(); 02406 llvm::Value *objectSize 02407 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType)); 02408 02409 index = CGF.Builder.CreateMul(index, objectSize); 02410 02411 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy); 02412 result = CGF.Builder.CreateGEP(result, index, "add.ptr"); 02413 return CGF.Builder.CreateBitCast(result, pointer->getType()); 02414 } 02415 02416 QualType elementType = pointerType->getPointeeType(); 02417 if (const VariableArrayType *vla 02418 = CGF.getContext().getAsVariableArrayType(elementType)) { 02419 // The element count here is the total number of non-VLA elements. 02420 llvm::Value *numElements = CGF.getVLASize(vla).first; 02421 02422 // Effectively, the multiply by the VLA size is part of the GEP. 02423 // GEP indexes are signed, and scaling an index isn't permitted to 02424 // signed-overflow, so we use the same semantics for our explicit 02425 // multiply. We suppress this if overflow is not undefined behavior. 02426 if (CGF.getLangOpts().isSignedOverflowDefined()) { 02427 index = CGF.Builder.CreateMul(index, numElements, "vla.index"); 02428 pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr"); 02429 } else { 02430 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index"); 02431 pointer = CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr"); 02432 } 02433 return pointer; 02434 } 02435 02436 // Explicitly handle GNU void* and function pointer arithmetic extensions. The 02437 // GNU void* casts amount to no-ops since our void* type is i8*, but this is 02438 // future proof. 02439 if (elementType->isVoidType() || elementType->isFunctionType()) { 02440 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy); 02441 result = CGF.Builder.CreateGEP(result, index, "add.ptr"); 02442 return CGF.Builder.CreateBitCast(result, pointer->getType()); 02443 } 02444 02445 if (CGF.getLangOpts().isSignedOverflowDefined()) 02446 return CGF.Builder.CreateGEP(pointer, index, "add.ptr"); 02447 02448 return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr"); 02449 } 02450 02451 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and 02452 // Addend. Use negMul and negAdd to negate the first operand of the Mul or 02453 // the add operand respectively. This allows fmuladd to represent a*b-c, or 02454 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to 02455 // efficient operations. 02456 static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend, 02457 const CodeGenFunction &CGF, CGBuilderTy &Builder, 02458 bool negMul, bool negAdd) { 02459 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set."); 02460 02461 Value *MulOp0 = MulOp->getOperand(0); 02462 Value *MulOp1 = MulOp->getOperand(1); 02463 if (negMul) { 02464 MulOp0 = 02465 Builder.CreateFSub( 02466 llvm::ConstantFP::getZeroValueForNegation(MulOp0->getType()), MulOp0, 02467 "neg"); 02468 } else if (negAdd) { 02469 Addend = 02470 Builder.CreateFSub( 02471 llvm::ConstantFP::getZeroValueForNegation(Addend->getType()), Addend, 02472 "neg"); 02473 } 02474 02475 Value *FMulAdd = 02476 Builder.CreateCall3( 02477 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()), 02478 MulOp0, MulOp1, Addend); 02479 MulOp->eraseFromParent(); 02480 02481 return FMulAdd; 02482 } 02483 02484 // Check whether it would be legal to emit an fmuladd intrinsic call to 02485 // represent op and if so, build the fmuladd. 02486 // 02487 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on. 02488 // Does NOT check the type of the operation - it's assumed that this function 02489 // will be called from contexts where it's known that the type is contractable. 02490 static Value* tryEmitFMulAdd(const BinOpInfo &op, 02491 const CodeGenFunction &CGF, CGBuilderTy &Builder, 02492 bool isSub=false) { 02493 02494 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || 02495 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && 02496 "Only fadd/fsub can be the root of an fmuladd."); 02497 02498 // Check whether this op is marked as fusable. 02499 if (!op.FPContractable) 02500 return nullptr; 02501 02502 // Check whether -ffp-contract=on. (If -ffp-contract=off/fast, fusing is 02503 // either disabled, or handled entirely by the LLVM backend). 02504 if (CGF.CGM.getCodeGenOpts().getFPContractMode() != CodeGenOptions::FPC_On) 02505 return nullptr; 02506 02507 // We have a potentially fusable op. Look for a mul on one of the operands. 02508 if (llvm::BinaryOperator* LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) { 02509 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul) { 02510 assert(LHSBinOp->getNumUses() == 0 && 02511 "Operations with multiple uses shouldn't be contracted."); 02512 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 02513 } 02514 } else if (llvm::BinaryOperator* RHSBinOp = 02515 dyn_cast<llvm::BinaryOperator>(op.RHS)) { 02516 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul) { 02517 assert(RHSBinOp->getNumUses() == 0 && 02518 "Operations with multiple uses shouldn't be contracted."); 02519 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 02520 } 02521 } 02522 02523 return nullptr; 02524 } 02525 02526 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { 02527 if (op.LHS->getType()->isPointerTy() || 02528 op.RHS->getType()->isPointerTy()) 02529 return emitPointerArithmetic(CGF, op, /*subtraction*/ false); 02530 02531 if (op.Ty->isSignedIntegerOrEnumerationType()) { 02532 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 02533 case LangOptions::SOB_Defined: 02534 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 02535 case LangOptions::SOB_Undefined: 02536 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 02537 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 02538 // Fall through. 02539 case LangOptions::SOB_Trapping: 02540 return EmitOverflowCheckedBinOp(op); 02541 } 02542 } 02543 02544 if (op.Ty->isUnsignedIntegerType() && 02545 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) 02546 return EmitOverflowCheckedBinOp(op); 02547 02548 if (op.LHS->getType()->isFPOrFPVectorTy()) { 02549 // Try to form an fmuladd. 02550 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder)) 02551 return FMulAdd; 02552 02553 return Builder.CreateFAdd(op.LHS, op.RHS, "add"); 02554 } 02555 02556 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 02557 } 02558 02559 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { 02560 // The LHS is always a pointer if either side is. 02561 if (!op.LHS->getType()->isPointerTy()) { 02562 if (op.Ty->isSignedIntegerOrEnumerationType()) { 02563 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 02564 case LangOptions::SOB_Defined: 02565 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 02566 case LangOptions::SOB_Undefined: 02567 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 02568 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 02569 // Fall through. 02570 case LangOptions::SOB_Trapping: 02571 return EmitOverflowCheckedBinOp(op); 02572 } 02573 } 02574 02575 if (op.Ty->isUnsignedIntegerType() && 02576 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) 02577 return EmitOverflowCheckedBinOp(op); 02578 02579 if (op.LHS->getType()->isFPOrFPVectorTy()) { 02580 // Try to form an fmuladd. 02581 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true)) 02582 return FMulAdd; 02583 return Builder.CreateFSub(op.LHS, op.RHS, "sub"); 02584 } 02585 02586 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 02587 } 02588 02589 // If the RHS is not a pointer, then we have normal pointer 02590 // arithmetic. 02591 if (!op.RHS->getType()->isPointerTy()) 02592 return emitPointerArithmetic(CGF, op, /*subtraction*/ true); 02593 02594 // Otherwise, this is a pointer subtraction. 02595 02596 // Do the raw subtraction part. 02597 llvm::Value *LHS 02598 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast"); 02599 llvm::Value *RHS 02600 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast"); 02601 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); 02602 02603 // Okay, figure out the element size. 02604 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 02605 QualType elementType = expr->getLHS()->getType()->getPointeeType(); 02606 02607 llvm::Value *divisor = nullptr; 02608 02609 // For a variable-length array, this is going to be non-constant. 02610 if (const VariableArrayType *vla 02611 = CGF.getContext().getAsVariableArrayType(elementType)) { 02612 llvm::Value *numElements; 02613 std::tie(numElements, elementType) = CGF.getVLASize(vla); 02614 02615 divisor = numElements; 02616 02617 // Scale the number of non-VLA elements by the non-VLA element size. 02618 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType); 02619 if (!eltSize.isOne()) 02620 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor); 02621 02622 // For everything elese, we can just compute it, safe in the 02623 // assumption that Sema won't let anything through that we can't 02624 // safely compute the size of. 02625 } else { 02626 CharUnits elementSize; 02627 // Handle GCC extension for pointer arithmetic on void* and 02628 // function pointer types. 02629 if (elementType->isVoidType() || elementType->isFunctionType()) 02630 elementSize = CharUnits::One(); 02631 else 02632 elementSize = CGF.getContext().getTypeSizeInChars(elementType); 02633 02634 // Don't even emit the divide for element size of 1. 02635 if (elementSize.isOne()) 02636 return diffInChars; 02637 02638 divisor = CGF.CGM.getSize(elementSize); 02639 } 02640 02641 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since 02642 // pointer difference in C is only defined in the case where both operands 02643 // are pointing to elements of an array. 02644 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div"); 02645 } 02646 02647 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) { 02648 llvm::IntegerType *Ty; 02649 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 02650 Ty = cast<llvm::IntegerType>(VT->getElementType()); 02651 else 02652 Ty = cast<llvm::IntegerType>(LHS->getType()); 02653 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1); 02654 } 02655 02656 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { 02657 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 02658 // RHS to the same size as the LHS. 02659 Value *RHS = Ops.RHS; 02660 if (Ops.LHS->getType() != RHS->getType()) 02661 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 02662 02663 if (CGF.SanOpts.has(SanitizerKind::Shift) && !CGF.getLangOpts().OpenCL && 02664 isa<llvm::IntegerType>(Ops.LHS->getType())) { 02665 CodeGenFunction::SanitizerScope SanScope(&CGF); 02666 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, RHS); 02667 llvm::Value *Valid = Builder.CreateICmpULE(RHS, WidthMinusOne); 02668 02669 if (Ops.Ty->hasSignedIntegerRepresentation()) { 02670 llvm::BasicBlock *Orig = Builder.GetInsertBlock(); 02671 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 02672 llvm::BasicBlock *CheckBitsShifted = CGF.createBasicBlock("check"); 02673 Builder.CreateCondBr(Valid, CheckBitsShifted, Cont); 02674 02675 // Check whether we are shifting any non-zero bits off the top of the 02676 // integer. 02677 CGF.EmitBlock(CheckBitsShifted); 02678 llvm::Value *BitsShiftedOff = 02679 Builder.CreateLShr(Ops.LHS, 02680 Builder.CreateSub(WidthMinusOne, RHS, "shl.zeros", 02681 /*NUW*/true, /*NSW*/true), 02682 "shl.check"); 02683 if (CGF.getLangOpts().CPlusPlus) { 02684 // In C99, we are not permitted to shift a 1 bit into the sign bit. 02685 // Under C++11's rules, shifting a 1 bit into the sign bit is 02686 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't 02687 // define signed left shifts, so we use the C99 and C++11 rules there). 02688 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1); 02689 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One); 02690 } 02691 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0); 02692 llvm::Value *SecondCheck = Builder.CreateICmpEQ(BitsShiftedOff, Zero); 02693 CGF.EmitBlock(Cont); 02694 llvm::PHINode *P = Builder.CreatePHI(Valid->getType(), 2); 02695 P->addIncoming(Valid, Orig); 02696 P->addIncoming(SecondCheck, CheckBitsShifted); 02697 Valid = P; 02698 } 02699 02700 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::Shift), Ops); 02701 } 02702 // OpenCL 6.3j: shift values are effectively % word size of LHS. 02703 if (CGF.getLangOpts().OpenCL) 02704 RHS = Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask"); 02705 02706 return Builder.CreateShl(Ops.LHS, RHS, "shl"); 02707 } 02708 02709 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { 02710 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 02711 // RHS to the same size as the LHS. 02712 Value *RHS = Ops.RHS; 02713 if (Ops.LHS->getType() != RHS->getType()) 02714 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 02715 02716 if (CGF.SanOpts.has(SanitizerKind::Shift) && !CGF.getLangOpts().OpenCL && 02717 isa<llvm::IntegerType>(Ops.LHS->getType())) { 02718 CodeGenFunction::SanitizerScope SanScope(&CGF); 02719 llvm::Value *Valid = 02720 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)); 02721 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::Shift), Ops); 02722 } 02723 02724 // OpenCL 6.3j: shift values are effectively % word size of LHS. 02725 if (CGF.getLangOpts().OpenCL) 02726 RHS = Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask"); 02727 02728 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 02729 return Builder.CreateLShr(Ops.LHS, RHS, "shr"); 02730 return Builder.CreateAShr(Ops.LHS, RHS, "shr"); 02731 } 02732 02733 enum IntrinsicType { VCMPEQ, VCMPGT }; 02734 // return corresponding comparison intrinsic for given vector type 02735 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, 02736 BuiltinType::Kind ElemKind) { 02737 switch (ElemKind) { 02738 default: llvm_unreachable("unexpected element type"); 02739 case BuiltinType::Char_U: 02740 case BuiltinType::UChar: 02741 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 02742 llvm::Intrinsic::ppc_altivec_vcmpgtub_p; 02743 case BuiltinType::Char_S: 02744 case BuiltinType::SChar: 02745 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 02746 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p; 02747 case BuiltinType::UShort: 02748 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 02749 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p; 02750 case BuiltinType::Short: 02751 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 02752 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p; 02753 case BuiltinType::UInt: 02754 case BuiltinType::ULong: 02755 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 02756 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p; 02757 case BuiltinType::Int: 02758 case BuiltinType::Long: 02759 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 02760 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p; 02761 case BuiltinType::Float: 02762 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p : 02763 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p; 02764 } 02765 } 02766 02767 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc, 02768 unsigned SICmpOpc, unsigned FCmpOpc) { 02769 TestAndClearIgnoreResultAssign(); 02770 Value *Result; 02771 QualType LHSTy = E->getLHS()->getType(); 02772 QualType RHSTy = E->getRHS()->getType(); 02773 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) { 02774 assert(E->getOpcode() == BO_EQ || 02775 E->getOpcode() == BO_NE); 02776 Value *LHS = CGF.EmitScalarExpr(E->getLHS()); 02777 Value *RHS = CGF.EmitScalarExpr(E->getRHS()); 02778 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison( 02779 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE); 02780 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { 02781 Value *LHS = Visit(E->getLHS()); 02782 Value *RHS = Visit(E->getRHS()); 02783 02784 // If AltiVec, the comparison results in a numeric type, so we use 02785 // intrinsics comparing vectors and giving 0 or 1 as a result 02786 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) { 02787 // constants for mapping CR6 register bits to predicate result 02788 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6; 02789 02790 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic; 02791 02792 // in several cases vector arguments order will be reversed 02793 Value *FirstVecArg = LHS, 02794 *SecondVecArg = RHS; 02795 02796 QualType ElTy = LHSTy->getAs<VectorType>()->getElementType(); 02797 const BuiltinType *BTy = ElTy->getAs<BuiltinType>(); 02798 BuiltinType::Kind ElementKind = BTy->getKind(); 02799 02800 switch(E->getOpcode()) { 02801 default: llvm_unreachable("is not a comparison operation"); 02802 case BO_EQ: 02803 CR6 = CR6_LT; 02804 ID = GetIntrinsic(VCMPEQ, ElementKind); 02805 break; 02806 case BO_NE: 02807 CR6 = CR6_EQ; 02808 ID = GetIntrinsic(VCMPEQ, ElementKind); 02809 break; 02810 case BO_LT: 02811 CR6 = CR6_LT; 02812 ID = GetIntrinsic(VCMPGT, ElementKind); 02813 std::swap(FirstVecArg, SecondVecArg); 02814 break; 02815 case BO_GT: 02816 CR6 = CR6_LT; 02817 ID = GetIntrinsic(VCMPGT, ElementKind); 02818 break; 02819 case BO_LE: 02820 if (ElementKind == BuiltinType::Float) { 02821 CR6 = CR6_LT; 02822 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 02823 std::swap(FirstVecArg, SecondVecArg); 02824 } 02825 else { 02826 CR6 = CR6_EQ; 02827 ID = GetIntrinsic(VCMPGT, ElementKind); 02828 } 02829 break; 02830 case BO_GE: 02831 if (ElementKind == BuiltinType::Float) { 02832 CR6 = CR6_LT; 02833 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 02834 } 02835 else { 02836 CR6 = CR6_EQ; 02837 ID = GetIntrinsic(VCMPGT, ElementKind); 02838 std::swap(FirstVecArg, SecondVecArg); 02839 } 02840 break; 02841 } 02842 02843 Value *CR6Param = Builder.getInt32(CR6); 02844 llvm::Function *F = CGF.CGM.getIntrinsic(ID); 02845 Result = Builder.CreateCall3(F, CR6Param, FirstVecArg, SecondVecArg, ""); 02846 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType()); 02847 } 02848 02849 if (LHS->getType()->isFPOrFPVectorTy()) { 02850 Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc, 02851 LHS, RHS, "cmp"); 02852 } else if (LHSTy->hasSignedIntegerRepresentation()) { 02853 Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc, 02854 LHS, RHS, "cmp"); 02855 } else { 02856 // Unsigned integers and pointers. 02857 Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 02858 LHS, RHS, "cmp"); 02859 } 02860 02861 // If this is a vector comparison, sign extend the result to the appropriate 02862 // vector integer type and return it (don't convert to bool). 02863 if (LHSTy->isVectorType()) 02864 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 02865 02866 } else { 02867 // Complex Comparison: can only be an equality comparison. 02868 CodeGenFunction::ComplexPairTy LHS, RHS; 02869 QualType CETy; 02870 if (auto *CTy = LHSTy->getAs<ComplexType>()) { 02871 LHS = CGF.EmitComplexExpr(E->getLHS()); 02872 CETy = CTy->getElementType(); 02873 } else { 02874 LHS.first = Visit(E->getLHS()); 02875 LHS.second = llvm::Constant::getNullValue(LHS.first->getType()); 02876 CETy = LHSTy; 02877 } 02878 if (auto *CTy = RHSTy->getAs<ComplexType>()) { 02879 RHS = CGF.EmitComplexExpr(E->getRHS()); 02880 assert(CGF.getContext().hasSameUnqualifiedType(CETy, 02881 CTy->getElementType()) && 02882 "The element types must always match."); 02883 (void)CTy; 02884 } else { 02885 RHS.first = Visit(E->getRHS()); 02886 RHS.second = llvm::Constant::getNullValue(RHS.first->getType()); 02887 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && 02888 "The element types must always match."); 02889 } 02890 02891 Value *ResultR, *ResultI; 02892 if (CETy->isRealFloatingType()) { 02893 ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, 02894 LHS.first, RHS.first, "cmp.r"); 02895 ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, 02896 LHS.second, RHS.second, "cmp.i"); 02897 } else { 02898 // Complex comparisons can only be equality comparisons. As such, signed 02899 // and unsigned opcodes are the same. 02900 ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 02901 LHS.first, RHS.first, "cmp.r"); 02902 ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 02903 LHS.second, RHS.second, "cmp.i"); 02904 } 02905 02906 if (E->getOpcode() == BO_EQ) { 02907 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); 02908 } else { 02909 assert(E->getOpcode() == BO_NE && 02910 "Complex comparison other than == or != ?"); 02911 Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); 02912 } 02913 } 02914 02915 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType()); 02916 } 02917 02918 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { 02919 bool Ignore = TestAndClearIgnoreResultAssign(); 02920 02921 Value *RHS; 02922 LValue LHS; 02923 02924 switch (E->getLHS()->getType().getObjCLifetime()) { 02925 case Qualifiers::OCL_Strong: 02926 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore); 02927 break; 02928 02929 case Qualifiers::OCL_Autoreleasing: 02930 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E); 02931 break; 02932 02933 case Qualifiers::OCL_Weak: 02934 RHS = Visit(E->getRHS()); 02935 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 02936 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore); 02937 break; 02938 02939 // No reason to do any of these differently. 02940 case Qualifiers::OCL_None: 02941 case Qualifiers::OCL_ExplicitNone: 02942 // __block variables need to have the rhs evaluated first, plus 02943 // this should improve codegen just a little. 02944 RHS = Visit(E->getRHS()); 02945 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 02946 02947 // Store the value into the LHS. Bit-fields are handled specially 02948 // because the result is altered by the store, i.e., [C99 6.5.16p1] 02949 // 'An assignment expression has the value of the left operand after 02950 // the assignment...'. 02951 if (LHS.isBitField()) 02952 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS); 02953 else 02954 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS); 02955 } 02956 02957 // If the result is clearly ignored, return now. 02958 if (Ignore) 02959 return nullptr; 02960 02961 // The result of an assignment in C is the assigned r-value. 02962 if (!CGF.getLangOpts().CPlusPlus) 02963 return RHS; 02964 02965 // If the lvalue is non-volatile, return the computed value of the assignment. 02966 if (!LHS.isVolatileQualified()) 02967 return RHS; 02968 02969 // Otherwise, reload the value. 02970 return EmitLoadOfLValue(LHS, E->getExprLoc()); 02971 } 02972 02973 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { 02974 RegionCounter Cnt = CGF.getPGORegionCounter(E); 02975 02976 // Perform vector logical and on comparisons with zero vectors. 02977 if (E->getType()->isVectorType()) { 02978 Cnt.beginRegion(Builder); 02979 02980 Value *LHS = Visit(E->getLHS()); 02981 Value *RHS = Visit(E->getRHS()); 02982 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 02983 if (LHS->getType()->isFPOrFPVectorTy()) { 02984 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 02985 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 02986 } else { 02987 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 02988 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 02989 } 02990 Value *And = Builder.CreateAnd(LHS, RHS); 02991 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext"); 02992 } 02993 02994 llvm::Type *ResTy = ConvertType(E->getType()); 02995 02996 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. 02997 // If we have 1 && X, just emit X without inserting the control flow. 02998 bool LHSCondVal; 02999 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 03000 if (LHSCondVal) { // If we have 1 && X, just emit X. 03001 Cnt.beginRegion(Builder); 03002 03003 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 03004 // ZExt result to int or bool. 03005 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext"); 03006 } 03007 03008 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. 03009 if (!CGF.ContainsLabel(E->getRHS())) 03010 return llvm::Constant::getNullValue(ResTy); 03011 } 03012 03013 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); 03014 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); 03015 03016 CodeGenFunction::ConditionalEvaluation eval(CGF); 03017 03018 // Branch on the LHS first. If it is false, go to the failure (cont) block. 03019 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock, Cnt.getCount()); 03020 03021 // Any edges into the ContBlock are now from an (indeterminate number of) 03022 // edges from this first condition. All of these values will be false. Start 03023 // setting up the PHI node in the Cont Block for this. 03024 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 03025 "", ContBlock); 03026 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 03027 PI != PE; ++PI) 03028 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); 03029 03030 eval.begin(CGF); 03031 CGF.EmitBlock(RHSBlock); 03032 Cnt.beginRegion(Builder); 03033 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 03034 eval.end(CGF); 03035 03036 // Reaquire the RHS block, as there may be subblocks inserted. 03037 RHSBlock = Builder.GetInsertBlock(); 03038 03039 // Emit an unconditional branch from this block to ContBlock. 03040 { 03041 // There is no need to emit line number for unconditional branch. 03042 SuppressDebugLocation S(Builder); 03043 CGF.EmitBlock(ContBlock); 03044 } 03045 // Insert an entry into the phi node for the edge with the value of RHSCond. 03046 PN->addIncoming(RHSCond, RHSBlock); 03047 03048 // ZExt result to int. 03049 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext"); 03050 } 03051 03052 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { 03053 RegionCounter Cnt = CGF.getPGORegionCounter(E); 03054 03055 // Perform vector logical or on comparisons with zero vectors. 03056 if (E->getType()->isVectorType()) { 03057 Cnt.beginRegion(Builder); 03058 03059 Value *LHS = Visit(E->getLHS()); 03060 Value *RHS = Visit(E->getRHS()); 03061 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 03062 if (LHS->getType()->isFPOrFPVectorTy()) { 03063 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 03064 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 03065 } else { 03066 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 03067 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 03068 } 03069 Value *Or = Builder.CreateOr(LHS, RHS); 03070 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext"); 03071 } 03072 03073 llvm::Type *ResTy = ConvertType(E->getType()); 03074 03075 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. 03076 // If we have 0 || X, just emit X without inserting the control flow. 03077 bool LHSCondVal; 03078 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 03079 if (!LHSCondVal) { // If we have 0 || X, just emit X. 03080 Cnt.beginRegion(Builder); 03081 03082 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 03083 // ZExt result to int or bool. 03084 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext"); 03085 } 03086 03087 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. 03088 if (!CGF.ContainsLabel(E->getRHS())) 03089 return llvm::ConstantInt::get(ResTy, 1); 03090 } 03091 03092 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); 03093 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); 03094 03095 CodeGenFunction::ConditionalEvaluation eval(CGF); 03096 03097 // Branch on the LHS first. If it is true, go to the success (cont) block. 03098 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock, 03099 Cnt.getParentCount() - Cnt.getCount()); 03100 03101 // Any edges into the ContBlock are now from an (indeterminate number of) 03102 // edges from this first condition. All of these values will be true. Start 03103 // setting up the PHI node in the Cont Block for this. 03104 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 03105 "", ContBlock); 03106 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 03107 PI != PE; ++PI) 03108 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI); 03109 03110 eval.begin(CGF); 03111 03112 // Emit the RHS condition as a bool value. 03113 CGF.EmitBlock(RHSBlock); 03114 Cnt.beginRegion(Builder); 03115 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 03116 03117 eval.end(CGF); 03118 03119 // Reaquire the RHS block, as there may be subblocks inserted. 03120 RHSBlock = Builder.GetInsertBlock(); 03121 03122 // Emit an unconditional branch from this block to ContBlock. Insert an entry 03123 // into the phi node for the edge with the value of RHSCond. 03124 CGF.EmitBlock(ContBlock); 03125 PN->addIncoming(RHSCond, RHSBlock); 03126 03127 // ZExt result to int. 03128 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext"); 03129 } 03130 03131 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { 03132 CGF.EmitIgnoredExpr(E->getLHS()); 03133 CGF.EnsureInsertPoint(); 03134 return Visit(E->getRHS()); 03135 } 03136 03137 //===----------------------------------------------------------------------===// 03138 // Other Operators 03139 //===----------------------------------------------------------------------===// 03140 03141 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified 03142 /// expression is cheap enough and side-effect-free enough to evaluate 03143 /// unconditionally instead of conditionally. This is used to convert control 03144 /// flow into selects in some cases. 03145 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, 03146 CodeGenFunction &CGF) { 03147 // Anything that is an integer or floating point constant is fine. 03148 return E->IgnoreParens()->isEvaluatable(CGF.getContext()); 03149 03150 // Even non-volatile automatic variables can't be evaluated unconditionally. 03151 // Referencing a thread_local may cause non-trivial initialization work to 03152 // occur. If we're inside a lambda and one of the variables is from the scope 03153 // outside the lambda, that function may have returned already. Reading its 03154 // locals is a bad idea. Also, these reads may introduce races there didn't 03155 // exist in the source-level program. 03156 } 03157 03158 03159 Value *ScalarExprEmitter:: 03160 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 03161 TestAndClearIgnoreResultAssign(); 03162 03163 // Bind the common expression if necessary. 03164 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 03165 RegionCounter Cnt = CGF.getPGORegionCounter(E); 03166 03167 Expr *condExpr = E->getCond(); 03168 Expr *lhsExpr = E->getTrueExpr(); 03169 Expr *rhsExpr = E->getFalseExpr(); 03170 03171 // If the condition constant folds and can be elided, try to avoid emitting 03172 // the condition and the dead arm. 03173 bool CondExprBool; 03174 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 03175 Expr *live = lhsExpr, *dead = rhsExpr; 03176 if (!CondExprBool) std::swap(live, dead); 03177 03178 // If the dead side doesn't have labels we need, just emit the Live part. 03179 if (!CGF.ContainsLabel(dead)) { 03180 if (CondExprBool) 03181 Cnt.beginRegion(Builder); 03182 Value *Result = Visit(live); 03183 03184 // If the live part is a throw expression, it acts like it has a void 03185 // type, so evaluating it returns a null Value*. However, a conditional 03186 // with non-void type must return a non-null Value*. 03187 if (!Result && !E->getType()->isVoidType()) 03188 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType())); 03189 03190 return Result; 03191 } 03192 } 03193 03194 // OpenCL: If the condition is a vector, we can treat this condition like 03195 // the select function. 03196 if (CGF.getLangOpts().OpenCL 03197 && condExpr->getType()->isVectorType()) { 03198 Cnt.beginRegion(Builder); 03199 03200 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 03201 llvm::Value *LHS = Visit(lhsExpr); 03202 llvm::Value *RHS = Visit(rhsExpr); 03203 03204 llvm::Type *condType = ConvertType(condExpr->getType()); 03205 llvm::VectorType *vecTy = cast<llvm::VectorType>(condType); 03206 03207 unsigned numElem = vecTy->getNumElements(); 03208 llvm::Type *elemType = vecTy->getElementType(); 03209 03210 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy); 03211 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec); 03212 llvm::Value *tmp = Builder.CreateSExt(TestMSB, 03213 llvm::VectorType::get(elemType, 03214 numElem), 03215 "sext"); 03216 llvm::Value *tmp2 = Builder.CreateNot(tmp); 03217 03218 // Cast float to int to perform ANDs if necessary. 03219 llvm::Value *RHSTmp = RHS; 03220 llvm::Value *LHSTmp = LHS; 03221 bool wasCast = false; 03222 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType()); 03223 if (rhsVTy->getElementType()->isFloatingPointTy()) { 03224 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType()); 03225 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType()); 03226 wasCast = true; 03227 } 03228 03229 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2); 03230 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp); 03231 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond"); 03232 if (wasCast) 03233 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType()); 03234 03235 return tmp5; 03236 } 03237 03238 // If this is a really simple expression (like x ? 4 : 5), emit this as a 03239 // select instead of as control flow. We can only do this if it is cheap and 03240 // safe to evaluate the LHS and RHS unconditionally. 03241 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && 03242 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { 03243 Cnt.beginRegion(Builder); 03244 03245 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr); 03246 llvm::Value *LHS = Visit(lhsExpr); 03247 llvm::Value *RHS = Visit(rhsExpr); 03248 if (!LHS) { 03249 // If the conditional has void type, make sure we return a null Value*. 03250 assert(!RHS && "LHS and RHS types must match"); 03251 return nullptr; 03252 } 03253 return Builder.CreateSelect(CondV, LHS, RHS, "cond"); 03254 } 03255 03256 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 03257 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 03258 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 03259 03260 CodeGenFunction::ConditionalEvaluation eval(CGF); 03261 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock, Cnt.getCount()); 03262 03263 CGF.EmitBlock(LHSBlock); 03264 Cnt.beginRegion(Builder); 03265 eval.begin(CGF); 03266 Value *LHS = Visit(lhsExpr); 03267 eval.end(CGF); 03268 03269 LHSBlock = Builder.GetInsertBlock(); 03270 Builder.CreateBr(ContBlock); 03271 03272 CGF.EmitBlock(RHSBlock); 03273 eval.begin(CGF); 03274 Value *RHS = Visit(rhsExpr); 03275 eval.end(CGF); 03276 03277 RHSBlock = Builder.GetInsertBlock(); 03278 CGF.EmitBlock(ContBlock); 03279 03280 // If the LHS or RHS is a throw expression, it will be legitimately null. 03281 if (!LHS) 03282 return RHS; 03283 if (!RHS) 03284 return LHS; 03285 03286 // Create a PHI node for the real part. 03287 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond"); 03288 PN->addIncoming(LHS, LHSBlock); 03289 PN->addIncoming(RHS, RHSBlock); 03290 return PN; 03291 } 03292 03293 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { 03294 return Visit(E->getChosenSubExpr()); 03295 } 03296 03297 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 03298 QualType Ty = VE->getType(); 03299 if (Ty->isVariablyModifiedType()) 03300 CGF.EmitVariablyModifiedType(Ty); 03301 03302 llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr()); 03303 llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType()); 03304 03305 // If EmitVAArg fails, we fall back to the LLVM instruction. 03306 if (!ArgPtr) 03307 return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType())); 03308 03309 // FIXME Volatility. 03310 return Builder.CreateLoad(ArgPtr); 03311 } 03312 03313 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) { 03314 return CGF.EmitBlockLiteral(block); 03315 } 03316 03317 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) { 03318 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 03319 llvm::Type *DstTy = ConvertType(E->getType()); 03320 03321 // Going from vec4->vec3 or vec3->vec4 is a special case and requires 03322 // a shuffle vector instead of a bitcast. 03323 llvm::Type *SrcTy = Src->getType(); 03324 if (isa<llvm::VectorType>(DstTy) && isa<llvm::VectorType>(SrcTy)) { 03325 unsigned numElementsDst = cast<llvm::VectorType>(DstTy)->getNumElements(); 03326 unsigned numElementsSrc = cast<llvm::VectorType>(SrcTy)->getNumElements(); 03327 if ((numElementsDst == 3 && numElementsSrc == 4) 03328 || (numElementsDst == 4 && numElementsSrc == 3)) { 03329 03330 03331 // In the case of going from int4->float3, a bitcast is needed before 03332 // doing a shuffle. 03333 llvm::Type *srcElemTy = 03334 cast<llvm::VectorType>(SrcTy)->getElementType(); 03335 llvm::Type *dstElemTy = 03336 cast<llvm::VectorType>(DstTy)->getElementType(); 03337 03338 if ((srcElemTy->isIntegerTy() && dstElemTy->isFloatTy()) 03339 || (srcElemTy->isFloatTy() && dstElemTy->isIntegerTy())) { 03340 // Create a float type of the same size as the source or destination. 03341 llvm::VectorType *newSrcTy = llvm::VectorType::get(dstElemTy, 03342 numElementsSrc); 03343 03344 Src = Builder.CreateBitCast(Src, newSrcTy, "astypeCast"); 03345 } 03346 03347 llvm::Value *UnV = llvm::UndefValue::get(Src->getType()); 03348 03349 SmallVector<llvm::Constant*, 3> Args; 03350 Args.push_back(Builder.getInt32(0)); 03351 Args.push_back(Builder.getInt32(1)); 03352 Args.push_back(Builder.getInt32(2)); 03353 03354 if (numElementsDst == 4) 03355 Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); 03356 03357 llvm::Constant *Mask = llvm::ConstantVector::get(Args); 03358 03359 return Builder.CreateShuffleVector(Src, UnV, Mask, "astype"); 03360 } 03361 } 03362 03363 return Builder.CreateBitCast(Src, DstTy, "astype"); 03364 } 03365 03366 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) { 03367 return CGF.EmitAtomicExpr(E).getScalarVal(); 03368 } 03369 03370 //===----------------------------------------------------------------------===// 03371 // Entry Point into this File 03372 //===----------------------------------------------------------------------===// 03373 03374 /// EmitScalarExpr - Emit the computation of the specified expression of scalar 03375 /// type, ignoring the result. 03376 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { 03377 assert(E && hasScalarEvaluationKind(E->getType()) && 03378 "Invalid scalar expression to emit"); 03379 03380 if (isa<CXXDefaultArgExpr>(E)) 03381 disableDebugInfo(); 03382 Value *V = ScalarExprEmitter(*this, IgnoreResultAssign) 03383 .Visit(const_cast<Expr*>(E)); 03384 if (isa<CXXDefaultArgExpr>(E)) 03385 enableDebugInfo(); 03386 return V; 03387 } 03388 03389 /// EmitScalarConversion - Emit a conversion from the specified type to the 03390 /// specified destination type, both of which are LLVM scalar types. 03391 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, 03392 QualType DstTy) { 03393 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && 03394 "Invalid scalar expression to emit"); 03395 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy); 03396 } 03397 03398 /// EmitComplexToScalarConversion - Emit a conversion from the specified complex 03399 /// type to the specified destination type, where the destination type is an 03400 /// LLVM scalar type. 03401 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, 03402 QualType SrcTy, 03403 QualType DstTy) { 03404 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && 03405 "Invalid complex -> scalar conversion"); 03406 return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy, 03407 DstTy); 03408 } 03409 03410 03411 llvm::Value *CodeGenFunction:: 03412 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 03413 bool isInc, bool isPre) { 03414 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre); 03415 } 03416 03417 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { 03418 llvm::Value *V; 03419 // object->isa or (*object).isa 03420 // Generate code as for: *(Class*)object 03421 // build Class* type 03422 llvm::Type *ClassPtrTy = ConvertType(E->getType()); 03423 03424 Expr *BaseExpr = E->getBase(); 03425 if (BaseExpr->isRValue()) { 03426 V = CreateMemTemp(E->getType(), "resval"); 03427 llvm::Value *Src = EmitScalarExpr(BaseExpr); 03428 Builder.CreateStore(Src, V); 03429 V = ScalarExprEmitter(*this).EmitLoadOfLValue( 03430 MakeNaturalAlignAddrLValue(V, E->getType()), E->getExprLoc()); 03431 } else { 03432 if (E->isArrow()) 03433 V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr); 03434 else 03435 V = EmitLValue(BaseExpr).getAddress(); 03436 } 03437 03438 // build Class* type 03439 ClassPtrTy = ClassPtrTy->getPointerTo(); 03440 V = Builder.CreateBitCast(V, ClassPtrTy); 03441 return MakeNaturalAlignAddrLValue(V, E->getType()); 03442 } 03443 03444 03445 LValue CodeGenFunction::EmitCompoundAssignmentLValue( 03446 const CompoundAssignOperator *E) { 03447 ScalarExprEmitter Scalar(*this); 03448 Value *Result = nullptr; 03449 switch (E->getOpcode()) { 03450 #define COMPOUND_OP(Op) \ 03451 case BO_##Op##Assign: \ 03452 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \ 03453 Result) 03454 COMPOUND_OP(Mul); 03455 COMPOUND_OP(Div); 03456 COMPOUND_OP(Rem); 03457 COMPOUND_OP(Add); 03458 COMPOUND_OP(Sub); 03459 COMPOUND_OP(Shl); 03460 COMPOUND_OP(Shr); 03461 COMPOUND_OP(And); 03462 COMPOUND_OP(Xor); 03463 COMPOUND_OP(Or); 03464 #undef COMPOUND_OP 03465 03466 case BO_PtrMemD: 03467 case BO_PtrMemI: 03468 case BO_Mul: 03469 case BO_Div: 03470 case BO_Rem: 03471 case BO_Add: 03472 case BO_Sub: 03473 case BO_Shl: 03474 case BO_Shr: 03475 case BO_LT: 03476 case BO_GT: 03477 case BO_LE: 03478 case BO_GE: 03479 case BO_EQ: 03480 case BO_NE: 03481 case BO_And: 03482 case BO_Xor: 03483 case BO_Or: 03484 case BO_LAnd: 03485 case BO_LOr: 03486 case BO_Assign: 03487 case BO_Comma: 03488 llvm_unreachable("Not valid compound assignment operators"); 03489 } 03490 03491 llvm_unreachable("Unhandled compound assignment operator"); 03492 }