clang API Documentation
00001 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This file defines ExprEngine's support for calls and returns. 00011 // 00012 //===----------------------------------------------------------------------===// 00013 00014 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" 00015 #include "PrettyStackTraceLocationContext.h" 00016 #include "clang/AST/CXXInheritance.h" 00017 #include "clang/AST/DeclCXX.h" 00018 #include "clang/AST/ParentMap.h" 00019 #include "clang/Analysis/Analyses/LiveVariables.h" 00020 #include "clang/StaticAnalyzer/Core/CheckerManager.h" 00021 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" 00022 #include "llvm/ADT/SmallSet.h" 00023 #include "llvm/ADT/Statistic.h" 00024 #include "llvm/Support/SaveAndRestore.h" 00025 00026 using namespace clang; 00027 using namespace ento; 00028 00029 #define DEBUG_TYPE "ExprEngine" 00030 00031 STATISTIC(NumOfDynamicDispatchPathSplits, 00032 "The # of times we split the path due to imprecise dynamic dispatch info"); 00033 00034 STATISTIC(NumInlinedCalls, 00035 "The # of times we inlined a call"); 00036 00037 STATISTIC(NumReachedInlineCountMax, 00038 "The # of times we reached inline count maximum"); 00039 00040 void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) { 00041 // Get the entry block in the CFG of the callee. 00042 const StackFrameContext *calleeCtx = CE.getCalleeContext(); 00043 PrettyStackTraceLocationContext CrashInfo(calleeCtx); 00044 00045 const CFG *CalleeCFG = calleeCtx->getCFG(); 00046 const CFGBlock *Entry = &(CalleeCFG->getEntry()); 00047 00048 // Validate the CFG. 00049 assert(Entry->empty()); 00050 assert(Entry->succ_size() == 1); 00051 00052 // Get the solitary successor. 00053 const CFGBlock *Succ = *(Entry->succ_begin()); 00054 00055 // Construct an edge representing the starting location in the callee. 00056 BlockEdge Loc(Entry, Succ, calleeCtx); 00057 00058 ProgramStateRef state = Pred->getState(); 00059 00060 // Construct a new node and add it to the worklist. 00061 bool isNew; 00062 ExplodedNode *Node = G.getNode(Loc, state, false, &isNew); 00063 Node->addPredecessor(Pred, G); 00064 if (isNew) 00065 Engine.getWorkList()->enqueue(Node); 00066 } 00067 00068 // Find the last statement on the path to the exploded node and the 00069 // corresponding Block. 00070 static std::pair<const Stmt*, 00071 const CFGBlock*> getLastStmt(const ExplodedNode *Node) { 00072 const Stmt *S = nullptr; 00073 const CFGBlock *Blk = nullptr; 00074 const StackFrameContext *SF = 00075 Node->getLocation().getLocationContext()->getCurrentStackFrame(); 00076 00077 // Back up through the ExplodedGraph until we reach a statement node in this 00078 // stack frame. 00079 while (Node) { 00080 const ProgramPoint &PP = Node->getLocation(); 00081 00082 if (PP.getLocationContext()->getCurrentStackFrame() == SF) { 00083 if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) { 00084 S = SP->getStmt(); 00085 break; 00086 } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) { 00087 S = CEE->getCalleeContext()->getCallSite(); 00088 if (S) 00089 break; 00090 00091 // If there is no statement, this is an implicitly-generated call. 00092 // We'll walk backwards over it and then continue the loop to find 00093 // an actual statement. 00094 Optional<CallEnter> CE; 00095 do { 00096 Node = Node->getFirstPred(); 00097 CE = Node->getLocationAs<CallEnter>(); 00098 } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext()); 00099 00100 // Continue searching the graph. 00101 } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) { 00102 Blk = BE->getSrc(); 00103 } 00104 } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) { 00105 // If we reached the CallEnter for this function, it has no statements. 00106 if (CE->getCalleeContext() == SF) 00107 break; 00108 } 00109 00110 if (Node->pred_empty()) 00111 return std::make_pair(nullptr, nullptr); 00112 00113 Node = *Node->pred_begin(); 00114 } 00115 00116 return std::make_pair(S, Blk); 00117 } 00118 00119 /// Adjusts a return value when the called function's return type does not 00120 /// match the caller's expression type. This can happen when a dynamic call 00121 /// is devirtualized, and the overridding method has a covariant (more specific) 00122 /// return type than the parent's method. For C++ objects, this means we need 00123 /// to add base casts. 00124 static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy, 00125 StoreManager &StoreMgr) { 00126 // For now, the only adjustments we handle apply only to locations. 00127 if (!V.getAs<Loc>()) 00128 return V; 00129 00130 // If the types already match, don't do any unnecessary work. 00131 ExpectedTy = ExpectedTy.getCanonicalType(); 00132 ActualTy = ActualTy.getCanonicalType(); 00133 if (ExpectedTy == ActualTy) 00134 return V; 00135 00136 // No adjustment is needed between Objective-C pointer types. 00137 if (ExpectedTy->isObjCObjectPointerType() && 00138 ActualTy->isObjCObjectPointerType()) 00139 return V; 00140 00141 // C++ object pointers may need "derived-to-base" casts. 00142 const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl(); 00143 const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl(); 00144 if (ExpectedClass && ActualClass) { 00145 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, 00146 /*DetectVirtual=*/false); 00147 if (ActualClass->isDerivedFrom(ExpectedClass, Paths) && 00148 !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) { 00149 return StoreMgr.evalDerivedToBase(V, Paths.front()); 00150 } 00151 } 00152 00153 // Unfortunately, Objective-C does not enforce that overridden methods have 00154 // covariant return types, so we can't assert that that never happens. 00155 // Be safe and return UnknownVal(). 00156 return UnknownVal(); 00157 } 00158 00159 void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC, 00160 ExplodedNode *Pred, 00161 ExplodedNodeSet &Dst) { 00162 // Find the last statement in the function and the corresponding basic block. 00163 const Stmt *LastSt = nullptr; 00164 const CFGBlock *Blk = nullptr; 00165 std::tie(LastSt, Blk) = getLastStmt(Pred); 00166 if (!Blk || !LastSt) { 00167 Dst.Add(Pred); 00168 return; 00169 } 00170 00171 // Here, we destroy the current location context. We use the current 00172 // function's entire body as a diagnostic statement, with which the program 00173 // point will be associated. However, we only want to use LastStmt as a 00174 // reference for what to clean up if it's a ReturnStmt; otherwise, everything 00175 // is dead. 00176 SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC); 00177 const LocationContext *LCtx = Pred->getLocationContext(); 00178 removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx, 00179 LCtx->getAnalysisDeclContext()->getBody(), 00180 ProgramPoint::PostStmtPurgeDeadSymbolsKind); 00181 } 00182 00183 static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call, 00184 const StackFrameContext *calleeCtx) { 00185 const Decl *RuntimeCallee = calleeCtx->getDecl(); 00186 const Decl *StaticDecl = Call->getDecl(); 00187 assert(RuntimeCallee); 00188 if (!StaticDecl) 00189 return true; 00190 return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl(); 00191 } 00192 00193 /// Returns true if the CXXConstructExpr \p E was intended to construct a 00194 /// prvalue for the region in \p V. 00195 /// 00196 /// Note that we can't just test for rvalue vs. glvalue because 00197 /// CXXConstructExprs embedded in DeclStmts and initializers are considered 00198 /// rvalues by the AST, and the analyzer would like to treat them as lvalues. 00199 static bool isTemporaryPRValue(const CXXConstructExpr *E, SVal V) { 00200 if (E->isGLValue()) 00201 return false; 00202 00203 const MemRegion *MR = V.getAsRegion(); 00204 if (!MR) 00205 return false; 00206 00207 return isa<CXXTempObjectRegion>(MR); 00208 } 00209 00210 /// The call exit is simulated with a sequence of nodes, which occur between 00211 /// CallExitBegin and CallExitEnd. The following operations occur between the 00212 /// two program points: 00213 /// 1. CallExitBegin (triggers the start of call exit sequence) 00214 /// 2. Bind the return value 00215 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee. 00216 /// 4. CallExitEnd (switch to the caller context) 00217 /// 5. PostStmt<CallExpr> 00218 void ExprEngine::processCallExit(ExplodedNode *CEBNode) { 00219 // Step 1 CEBNode was generated before the call. 00220 PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext()); 00221 const StackFrameContext *calleeCtx = 00222 CEBNode->getLocationContext()->getCurrentStackFrame(); 00223 00224 // The parent context might not be a stack frame, so make sure we 00225 // look up the first enclosing stack frame. 00226 const StackFrameContext *callerCtx = 00227 calleeCtx->getParent()->getCurrentStackFrame(); 00228 00229 const Stmt *CE = calleeCtx->getCallSite(); 00230 ProgramStateRef state = CEBNode->getState(); 00231 // Find the last statement in the function and the corresponding basic block. 00232 const Stmt *LastSt = nullptr; 00233 const CFGBlock *Blk = nullptr; 00234 std::tie(LastSt, Blk) = getLastStmt(CEBNode); 00235 00236 // Generate a CallEvent /before/ cleaning the state, so that we can get the 00237 // correct value for 'this' (if necessary). 00238 CallEventManager &CEMgr = getStateManager().getCallEventManager(); 00239 CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state); 00240 00241 // Step 2: generate node with bound return value: CEBNode -> BindedRetNode. 00242 00243 // If the callee returns an expression, bind its value to CallExpr. 00244 if (CE) { 00245 if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) { 00246 const LocationContext *LCtx = CEBNode->getLocationContext(); 00247 SVal V = state->getSVal(RS, LCtx); 00248 00249 // Ensure that the return type matches the type of the returned Expr. 00250 if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) { 00251 QualType ReturnedTy = 00252 CallEvent::getDeclaredResultType(calleeCtx->getDecl()); 00253 if (!ReturnedTy.isNull()) { 00254 if (const Expr *Ex = dyn_cast<Expr>(CE)) { 00255 V = adjustReturnValue(V, Ex->getType(), ReturnedTy, 00256 getStoreManager()); 00257 } 00258 } 00259 } 00260 00261 state = state->BindExpr(CE, callerCtx, V); 00262 } 00263 00264 // Bind the constructed object value to CXXConstructExpr. 00265 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) { 00266 loc::MemRegionVal This = 00267 svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx); 00268 SVal ThisV = state->getSVal(This); 00269 00270 // If the constructed object is a temporary prvalue, get its bindings. 00271 if (isTemporaryPRValue(CCE, ThisV)) 00272 ThisV = state->getSVal(ThisV.castAs<Loc>()); 00273 00274 state = state->BindExpr(CCE, callerCtx, ThisV); 00275 } 00276 } 00277 00278 // Step 3: BindedRetNode -> CleanedNodes 00279 // If we can find a statement and a block in the inlined function, run remove 00280 // dead bindings before returning from the call. This is important to ensure 00281 // that we report the issues such as leaks in the stack contexts in which 00282 // they occurred. 00283 ExplodedNodeSet CleanedNodes; 00284 if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) { 00285 static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value"); 00286 PostStmt Loc(LastSt, calleeCtx, &retValBind); 00287 bool isNew; 00288 ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew); 00289 BindedRetNode->addPredecessor(CEBNode, G); 00290 if (!isNew) 00291 return; 00292 00293 NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode); 00294 currBldrCtx = &Ctx; 00295 // Here, we call the Symbol Reaper with 0 statement and callee location 00296 // context, telling it to clean up everything in the callee's context 00297 // (and its children). We use the callee's function body as a diagnostic 00298 // statement, with which the program point will be associated. 00299 removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx, 00300 calleeCtx->getAnalysisDeclContext()->getBody(), 00301 ProgramPoint::PostStmtPurgeDeadSymbolsKind); 00302 currBldrCtx = nullptr; 00303 } else { 00304 CleanedNodes.Add(CEBNode); 00305 } 00306 00307 for (ExplodedNodeSet::iterator I = CleanedNodes.begin(), 00308 E = CleanedNodes.end(); I != E; ++I) { 00309 00310 // Step 4: Generate the CallExit and leave the callee's context. 00311 // CleanedNodes -> CEENode 00312 CallExitEnd Loc(calleeCtx, callerCtx); 00313 bool isNew; 00314 ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState(); 00315 ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew); 00316 CEENode->addPredecessor(*I, G); 00317 if (!isNew) 00318 return; 00319 00320 // Step 5: Perform the post-condition check of the CallExpr and enqueue the 00321 // result onto the work list. 00322 // CEENode -> Dst -> WorkList 00323 NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode); 00324 SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx, 00325 &Ctx); 00326 SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex()); 00327 00328 CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState); 00329 00330 ExplodedNodeSet DstPostCall; 00331 getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode, 00332 *UpdatedCall, *this, 00333 /*WasInlined=*/true); 00334 00335 ExplodedNodeSet Dst; 00336 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) { 00337 getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg, 00338 *this, 00339 /*WasInlined=*/true); 00340 } else if (CE) { 00341 getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE, 00342 *this, /*WasInlined=*/true); 00343 } else { 00344 Dst.insert(DstPostCall); 00345 } 00346 00347 // Enqueue the next element in the block. 00348 for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end(); 00349 PSI != PSE; ++PSI) { 00350 Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(), 00351 calleeCtx->getIndex()+1); 00352 } 00353 } 00354 } 00355 00356 void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx, 00357 bool &IsRecursive, unsigned &StackDepth) { 00358 IsRecursive = false; 00359 StackDepth = 0; 00360 00361 while (LCtx) { 00362 if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) { 00363 const Decl *DI = SFC->getDecl(); 00364 00365 // Mark recursive (and mutually recursive) functions and always count 00366 // them when measuring the stack depth. 00367 if (DI == D) { 00368 IsRecursive = true; 00369 ++StackDepth; 00370 LCtx = LCtx->getParent(); 00371 continue; 00372 } 00373 00374 // Do not count the small functions when determining the stack depth. 00375 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI); 00376 const CFG *CalleeCFG = CalleeADC->getCFG(); 00377 if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize()) 00378 ++StackDepth; 00379 } 00380 LCtx = LCtx->getParent(); 00381 } 00382 00383 } 00384 00385 static bool IsInStdNamespace(const FunctionDecl *FD) { 00386 const DeclContext *DC = FD->getEnclosingNamespaceContext(); 00387 const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC); 00388 if (!ND) 00389 return false; 00390 00391 while (const DeclContext *Parent = ND->getParent()) { 00392 if (!isa<NamespaceDecl>(Parent)) 00393 break; 00394 ND = cast<NamespaceDecl>(Parent); 00395 } 00396 00397 return ND->isStdNamespace(); 00398 } 00399 00400 // The GDM component containing the dynamic dispatch bifurcation info. When 00401 // the exact type of the receiver is not known, we want to explore both paths - 00402 // one on which we do inline it and the other one on which we don't. This is 00403 // done to ensure we do not drop coverage. 00404 // This is the map from the receiver region to a bool, specifying either we 00405 // consider this region's information precise or not along the given path. 00406 namespace { 00407 enum DynamicDispatchMode { 00408 DynamicDispatchModeInlined = 1, 00409 DynamicDispatchModeConservative 00410 }; 00411 } 00412 REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap, 00413 CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *, 00414 unsigned)) 00415 00416 bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, 00417 NodeBuilder &Bldr, ExplodedNode *Pred, 00418 ProgramStateRef State) { 00419 assert(D); 00420 00421 const LocationContext *CurLC = Pred->getLocationContext(); 00422 const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame(); 00423 const LocationContext *ParentOfCallee = CallerSFC; 00424 if (Call.getKind() == CE_Block) { 00425 const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion(); 00426 assert(BR && "If we have the block definition we should have its region"); 00427 AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D); 00428 ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC, 00429 cast<BlockDecl>(D), 00430 BR); 00431 } 00432 00433 // This may be NULL, but that's fine. 00434 const Expr *CallE = Call.getOriginExpr(); 00435 00436 // Construct a new stack frame for the callee. 00437 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); 00438 const StackFrameContext *CalleeSFC = 00439 CalleeADC->getStackFrame(ParentOfCallee, CallE, 00440 currBldrCtx->getBlock(), 00441 currStmtIdx); 00442 00443 00444 CallEnter Loc(CallE, CalleeSFC, CurLC); 00445 00446 // Construct a new state which contains the mapping from actual to 00447 // formal arguments. 00448 State = State->enterStackFrame(Call, CalleeSFC); 00449 00450 bool isNew; 00451 if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) { 00452 N->addPredecessor(Pred, G); 00453 if (isNew) 00454 Engine.getWorkList()->enqueue(N); 00455 } 00456 00457 // If we decided to inline the call, the successor has been manually 00458 // added onto the work list so remove it from the node builder. 00459 Bldr.takeNodes(Pred); 00460 00461 NumInlinedCalls++; 00462 00463 // Mark the decl as visited. 00464 if (VisitedCallees) 00465 VisitedCallees->insert(D); 00466 00467 return true; 00468 } 00469 00470 static ProgramStateRef getInlineFailedState(ProgramStateRef State, 00471 const Stmt *CallE) { 00472 const void *ReplayState = State->get<ReplayWithoutInlining>(); 00473 if (!ReplayState) 00474 return nullptr; 00475 00476 assert(ReplayState == CallE && "Backtracked to the wrong call."); 00477 (void)CallE; 00478 00479 return State->remove<ReplayWithoutInlining>(); 00480 } 00481 00482 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred, 00483 ExplodedNodeSet &dst) { 00484 // Perform the previsit of the CallExpr. 00485 ExplodedNodeSet dstPreVisit; 00486 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this); 00487 00488 // Get the call in its initial state. We use this as a template to perform 00489 // all the checks. 00490 CallEventManager &CEMgr = getStateManager().getCallEventManager(); 00491 CallEventRef<> CallTemplate 00492 = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext()); 00493 00494 // Evaluate the function call. We try each of the checkers 00495 // to see if the can evaluate the function call. 00496 ExplodedNodeSet dstCallEvaluated; 00497 for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end(); 00498 I != E; ++I) { 00499 evalCall(dstCallEvaluated, *I, *CallTemplate); 00500 } 00501 00502 // Finally, perform the post-condition check of the CallExpr and store 00503 // the created nodes in 'Dst'. 00504 // Note that if the call was inlined, dstCallEvaluated will be empty. 00505 // The post-CallExpr check will occur in processCallExit. 00506 getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE, 00507 *this); 00508 } 00509 00510 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred, 00511 const CallEvent &Call) { 00512 // WARNING: At this time, the state attached to 'Call' may be older than the 00513 // state in 'Pred'. This is a minor optimization since CheckerManager will 00514 // use an updated CallEvent instance when calling checkers, but if 'Call' is 00515 // ever used directly in this function all callers should be updated to pass 00516 // the most recent state. (It is probably not worth doing the work here since 00517 // for some callers this will not be necessary.) 00518 00519 // Run any pre-call checks using the generic call interface. 00520 ExplodedNodeSet dstPreVisit; 00521 getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, Call, *this); 00522 00523 // Actually evaluate the function call. We try each of the checkers 00524 // to see if the can evaluate the function call, and get a callback at 00525 // defaultEvalCall if all of them fail. 00526 ExplodedNodeSet dstCallEvaluated; 00527 getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit, 00528 Call, *this); 00529 00530 // Finally, run any post-call checks. 00531 getCheckerManager().runCheckersForPostCall(Dst, dstCallEvaluated, 00532 Call, *this); 00533 } 00534 00535 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call, 00536 const LocationContext *LCtx, 00537 ProgramStateRef State) { 00538 const Expr *E = Call.getOriginExpr(); 00539 if (!E) 00540 return State; 00541 00542 // Some method families have known return values. 00543 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) { 00544 switch (Msg->getMethodFamily()) { 00545 default: 00546 break; 00547 case OMF_autorelease: 00548 case OMF_retain: 00549 case OMF_self: { 00550 // These methods return their receivers. 00551 return State->BindExpr(E, LCtx, Msg->getReceiverSVal()); 00552 } 00553 } 00554 } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){ 00555 SVal ThisV = C->getCXXThisVal(); 00556 00557 // If the constructed object is a temporary prvalue, get its bindings. 00558 if (isTemporaryPRValue(cast<CXXConstructExpr>(E), ThisV)) 00559 ThisV = State->getSVal(ThisV.castAs<Loc>()); 00560 00561 return State->BindExpr(E, LCtx, ThisV); 00562 } 00563 00564 // Conjure a symbol if the return value is unknown. 00565 QualType ResultTy = Call.getResultType(); 00566 SValBuilder &SVB = getSValBuilder(); 00567 unsigned Count = currBldrCtx->blockCount(); 00568 SVal R = SVB.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count); 00569 return State->BindExpr(E, LCtx, R); 00570 } 00571 00572 // Conservatively evaluate call by invalidating regions and binding 00573 // a conjured return value. 00574 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr, 00575 ExplodedNode *Pred, 00576 ProgramStateRef State) { 00577 State = Call.invalidateRegions(currBldrCtx->blockCount(), State); 00578 State = bindReturnValue(Call, Pred->getLocationContext(), State); 00579 00580 // And make the result node. 00581 Bldr.generateNode(Call.getProgramPoint(), State, Pred); 00582 } 00583 00584 enum CallInlinePolicy { 00585 CIP_Allowed, 00586 CIP_DisallowedOnce, 00587 CIP_DisallowedAlways 00588 }; 00589 00590 static CallInlinePolicy mayInlineCallKind(const CallEvent &Call, 00591 const ExplodedNode *Pred, 00592 AnalyzerOptions &Opts) { 00593 const LocationContext *CurLC = Pred->getLocationContext(); 00594 const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame(); 00595 switch (Call.getKind()) { 00596 case CE_Function: 00597 case CE_Block: 00598 break; 00599 case CE_CXXMember: 00600 case CE_CXXMemberOperator: 00601 if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions)) 00602 return CIP_DisallowedAlways; 00603 break; 00604 case CE_CXXConstructor: { 00605 if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors)) 00606 return CIP_DisallowedAlways; 00607 00608 const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call); 00609 00610 // FIXME: We don't handle constructors or destructors for arrays properly. 00611 // Even once we do, we still need to be careful about implicitly-generated 00612 // initializers for array fields in default move/copy constructors. 00613 const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion(); 00614 if (Target && isa<ElementRegion>(Target)) 00615 return CIP_DisallowedOnce; 00616 00617 // FIXME: This is a hack. We don't use the correct region for a new 00618 // expression, so if we inline the constructor its result will just be 00619 // thrown away. This short-term hack is tracked in <rdar://problem/12180598> 00620 // and the longer-term possible fix is discussed in PR12014. 00621 const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr(); 00622 if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr)) 00623 if (isa<CXXNewExpr>(Parent)) 00624 return CIP_DisallowedOnce; 00625 00626 // Inlining constructors requires including initializers in the CFG. 00627 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); 00628 assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers"); 00629 (void)ADC; 00630 00631 // If the destructor is trivial, it's always safe to inline the constructor. 00632 if (Ctor.getDecl()->getParent()->hasTrivialDestructor()) 00633 break; 00634 00635 // For other types, only inline constructors if destructor inlining is 00636 // also enabled. 00637 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) 00638 return CIP_DisallowedAlways; 00639 00640 // FIXME: This is a hack. We don't handle temporary destructors 00641 // right now, so we shouldn't inline their constructors. 00642 if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) 00643 if (!Target || !isa<DeclRegion>(Target)) 00644 return CIP_DisallowedOnce; 00645 00646 break; 00647 } 00648 case CE_CXXDestructor: { 00649 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) 00650 return CIP_DisallowedAlways; 00651 00652 // Inlining destructors requires building the CFG correctly. 00653 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); 00654 assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors"); 00655 (void)ADC; 00656 00657 const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call); 00658 00659 // FIXME: We don't handle constructors or destructors for arrays properly. 00660 const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion(); 00661 if (Target && isa<ElementRegion>(Target)) 00662 return CIP_DisallowedOnce; 00663 00664 break; 00665 } 00666 case CE_CXXAllocator: 00667 if (Opts.mayInlineCXXAllocator()) 00668 break; 00669 // Do not inline allocators until we model deallocators. 00670 // This is unfortunate, but basically necessary for smart pointers and such. 00671 return CIP_DisallowedAlways; 00672 case CE_ObjCMessage: 00673 if (!Opts.mayInlineObjCMethod()) 00674 return CIP_DisallowedAlways; 00675 if (!(Opts.getIPAMode() == IPAK_DynamicDispatch || 00676 Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate)) 00677 return CIP_DisallowedAlways; 00678 break; 00679 } 00680 00681 return CIP_Allowed; 00682 } 00683 00684 /// Returns true if the given C++ class contains a member with the given name. 00685 static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD, 00686 StringRef Name) { 00687 const IdentifierInfo &II = Ctx.Idents.get(Name); 00688 DeclarationName DeclName = Ctx.DeclarationNames.getIdentifier(&II); 00689 if (!RD->lookup(DeclName).empty()) 00690 return true; 00691 00692 CXXBasePaths Paths(false, false, false); 00693 if (RD->lookupInBases(&CXXRecordDecl::FindOrdinaryMember, 00694 DeclName.getAsOpaquePtr(), 00695 Paths)) 00696 return true; 00697 00698 return false; 00699 } 00700 00701 /// Returns true if the given C++ class is a container or iterator. 00702 /// 00703 /// Our heuristic for this is whether it contains a method named 'begin()' or a 00704 /// nested type named 'iterator' or 'iterator_category'. 00705 static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) { 00706 return hasMember(Ctx, RD, "begin") || 00707 hasMember(Ctx, RD, "iterator") || 00708 hasMember(Ctx, RD, "iterator_category"); 00709 } 00710 00711 /// Returns true if the given function refers to a method of a C++ container 00712 /// or iterator. 00713 /// 00714 /// We generally do a poor job modeling most containers right now, and might 00715 /// prefer not to inline their methods. 00716 static bool isContainerMethod(const ASTContext &Ctx, 00717 const FunctionDecl *FD) { 00718 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 00719 return isContainerClass(Ctx, MD->getParent()); 00720 return false; 00721 } 00722 00723 /// Returns true if the given function is the destructor of a class named 00724 /// "shared_ptr". 00725 static bool isCXXSharedPtrDtor(const FunctionDecl *FD) { 00726 const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD); 00727 if (!Dtor) 00728 return false; 00729 00730 const CXXRecordDecl *RD = Dtor->getParent(); 00731 if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo()) 00732 if (II->isStr("shared_ptr")) 00733 return true; 00734 00735 return false; 00736 } 00737 00738 /// Returns true if the function in \p CalleeADC may be inlined in general. 00739 /// 00740 /// This checks static properties of the function, such as its signature and 00741 /// CFG, to determine whether the analyzer should ever consider inlining it, 00742 /// in any context. 00743 static bool mayInlineDecl(AnalysisDeclContext *CalleeADC, 00744 AnalyzerOptions &Opts) { 00745 // FIXME: Do not inline variadic calls. 00746 if (CallEvent::isVariadic(CalleeADC->getDecl())) 00747 return false; 00748 00749 // Check certain C++-related inlining policies. 00750 ASTContext &Ctx = CalleeADC->getASTContext(); 00751 if (Ctx.getLangOpts().CPlusPlus) { 00752 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) { 00753 // Conditionally control the inlining of template functions. 00754 if (!Opts.mayInlineTemplateFunctions()) 00755 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate) 00756 return false; 00757 00758 // Conditionally control the inlining of C++ standard library functions. 00759 if (!Opts.mayInlineCXXStandardLibrary()) 00760 if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation())) 00761 if (IsInStdNamespace(FD)) 00762 return false; 00763 00764 // Conditionally control the inlining of methods on objects that look 00765 // like C++ containers. 00766 if (!Opts.mayInlineCXXContainerMethods()) 00767 if (!Ctx.getSourceManager().isInMainFile(FD->getLocation())) 00768 if (isContainerMethod(Ctx, FD)) 00769 return false; 00770 00771 // Conditionally control the inlining of the destructor of C++ shared_ptr. 00772 // We don't currently do a good job modeling shared_ptr because we can't 00773 // see the reference count, so treating as opaque is probably the best 00774 // idea. 00775 if (!Opts.mayInlineCXXSharedPtrDtor()) 00776 if (isCXXSharedPtrDtor(FD)) 00777 return false; 00778 00779 } 00780 } 00781 00782 // It is possible that the CFG cannot be constructed. 00783 // Be safe, and check if the CalleeCFG is valid. 00784 const CFG *CalleeCFG = CalleeADC->getCFG(); 00785 if (!CalleeCFG) 00786 return false; 00787 00788 // Do not inline large functions. 00789 if (CalleeCFG->getNumBlockIDs() > Opts.getMaxInlinableSize()) 00790 return false; 00791 00792 // It is possible that the live variables analysis cannot be 00793 // run. If so, bail out. 00794 if (!CalleeADC->getAnalysis<RelaxedLiveVariables>()) 00795 return false; 00796 00797 return true; 00798 } 00799 00800 bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D, 00801 const ExplodedNode *Pred) { 00802 if (!D) 00803 return false; 00804 00805 AnalysisManager &AMgr = getAnalysisManager(); 00806 AnalyzerOptions &Opts = AMgr.options; 00807 AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager(); 00808 AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D); 00809 00810 // Temporary object destructor processing is currently broken, so we never 00811 // inline them. 00812 // FIXME: Remove this once temp destructors are working. 00813 if (isa<CXXDestructorCall>(Call)) { 00814 if ((*currBldrCtx->getBlock())[currStmtIdx].getAs<CFGTemporaryDtor>()) 00815 return false; 00816 } 00817 00818 // The auto-synthesized bodies are essential to inline as they are 00819 // usually small and commonly used. Note: we should do this check early on to 00820 // ensure we always inline these calls. 00821 if (CalleeADC->isBodyAutosynthesized()) 00822 return true; 00823 00824 if (!AMgr.shouldInlineCall()) 00825 return false; 00826 00827 // Check if this function has been marked as non-inlinable. 00828 Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D); 00829 if (MayInline.hasValue()) { 00830 if (!MayInline.getValue()) 00831 return false; 00832 00833 } else { 00834 // We haven't actually checked the static properties of this function yet. 00835 // Do that now, and record our decision in the function summaries. 00836 if (mayInlineDecl(CalleeADC, Opts)) { 00837 Engine.FunctionSummaries->markMayInline(D); 00838 } else { 00839 Engine.FunctionSummaries->markShouldNotInline(D); 00840 return false; 00841 } 00842 } 00843 00844 // Check if we should inline a call based on its kind. 00845 // FIXME: this checks both static and dynamic properties of the call, which 00846 // means we're redoing a bit of work that could be cached in the function 00847 // summary. 00848 CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts); 00849 if (CIP != CIP_Allowed) { 00850 if (CIP == CIP_DisallowedAlways) { 00851 assert(!MayInline.hasValue() || MayInline.getValue()); 00852 Engine.FunctionSummaries->markShouldNotInline(D); 00853 } 00854 return false; 00855 } 00856 00857 const CFG *CalleeCFG = CalleeADC->getCFG(); 00858 00859 // Do not inline if recursive or we've reached max stack frame count. 00860 bool IsRecursive = false; 00861 unsigned StackDepth = 0; 00862 examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth); 00863 if ((StackDepth >= Opts.InlineMaxStackDepth) && 00864 ((CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize()) 00865 || IsRecursive)) 00866 return false; 00867 00868 // Do not inline large functions too many times. 00869 if ((Engine.FunctionSummaries->getNumTimesInlined(D) > 00870 Opts.getMaxTimesInlineLarge()) && 00871 CalleeCFG->getNumBlockIDs() > 13) { 00872 NumReachedInlineCountMax++; 00873 return false; 00874 } 00875 00876 if (HowToInline == Inline_Minimal && 00877 (CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize() 00878 || IsRecursive)) 00879 return false; 00880 00881 Engine.FunctionSummaries->bumpNumTimesInlined(D); 00882 00883 return true; 00884 } 00885 00886 static bool isTrivialObjectAssignment(const CallEvent &Call) { 00887 const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call); 00888 if (!ICall) 00889 return false; 00890 00891 const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl()); 00892 if (!MD) 00893 return false; 00894 if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator())) 00895 return false; 00896 00897 return MD->isTrivial(); 00898 } 00899 00900 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred, 00901 const CallEvent &CallTemplate) { 00902 // Make sure we have the most recent state attached to the call. 00903 ProgramStateRef State = Pred->getState(); 00904 CallEventRef<> Call = CallTemplate.cloneWithState(State); 00905 00906 // Special-case trivial assignment operators. 00907 if (isTrivialObjectAssignment(*Call)) { 00908 performTrivialCopy(Bldr, Pred, *Call); 00909 return; 00910 } 00911 00912 // Try to inline the call. 00913 // The origin expression here is just used as a kind of checksum; 00914 // this should still be safe even for CallEvents that don't come from exprs. 00915 const Expr *E = Call->getOriginExpr(); 00916 00917 ProgramStateRef InlinedFailedState = getInlineFailedState(State, E); 00918 if (InlinedFailedState) { 00919 // If we already tried once and failed, make sure we don't retry later. 00920 State = InlinedFailedState; 00921 } else { 00922 RuntimeDefinition RD = Call->getRuntimeDefinition(); 00923 const Decl *D = RD.getDecl(); 00924 if (shouldInlineCall(*Call, D, Pred)) { 00925 if (RD.mayHaveOtherDefinitions()) { 00926 AnalyzerOptions &Options = getAnalysisManager().options; 00927 00928 // Explore with and without inlining the call. 00929 if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) { 00930 BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred); 00931 return; 00932 } 00933 00934 // Don't inline if we're not in any dynamic dispatch mode. 00935 if (Options.getIPAMode() != IPAK_DynamicDispatch) { 00936 conservativeEvalCall(*Call, Bldr, Pred, State); 00937 return; 00938 } 00939 } 00940 00941 // We are not bifurcating and we do have a Decl, so just inline. 00942 if (inlineCall(*Call, D, Bldr, Pred, State)) 00943 return; 00944 } 00945 } 00946 00947 // If we can't inline it, handle the return value and invalidate the regions. 00948 conservativeEvalCall(*Call, Bldr, Pred, State); 00949 } 00950 00951 void ExprEngine::BifurcateCall(const MemRegion *BifurReg, 00952 const CallEvent &Call, const Decl *D, 00953 NodeBuilder &Bldr, ExplodedNode *Pred) { 00954 assert(BifurReg); 00955 BifurReg = BifurReg->StripCasts(); 00956 00957 // Check if we've performed the split already - note, we only want 00958 // to split the path once per memory region. 00959 ProgramStateRef State = Pred->getState(); 00960 const unsigned *BState = 00961 State->get<DynamicDispatchBifurcationMap>(BifurReg); 00962 if (BState) { 00963 // If we are on "inline path", keep inlining if possible. 00964 if (*BState == DynamicDispatchModeInlined) 00965 if (inlineCall(Call, D, Bldr, Pred, State)) 00966 return; 00967 // If inline failed, or we are on the path where we assume we 00968 // don't have enough info about the receiver to inline, conjure the 00969 // return value and invalidate the regions. 00970 conservativeEvalCall(Call, Bldr, Pred, State); 00971 return; 00972 } 00973 00974 // If we got here, this is the first time we process a message to this 00975 // region, so split the path. 00976 ProgramStateRef IState = 00977 State->set<DynamicDispatchBifurcationMap>(BifurReg, 00978 DynamicDispatchModeInlined); 00979 inlineCall(Call, D, Bldr, Pred, IState); 00980 00981 ProgramStateRef NoIState = 00982 State->set<DynamicDispatchBifurcationMap>(BifurReg, 00983 DynamicDispatchModeConservative); 00984 conservativeEvalCall(Call, Bldr, Pred, NoIState); 00985 00986 NumOfDynamicDispatchPathSplits++; 00987 return; 00988 } 00989 00990 00991 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred, 00992 ExplodedNodeSet &Dst) { 00993 00994 ExplodedNodeSet dstPreVisit; 00995 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this); 00996 00997 StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx); 00998 00999 if (RS->getRetValue()) { 01000 for (ExplodedNodeSet::iterator it = dstPreVisit.begin(), 01001 ei = dstPreVisit.end(); it != ei; ++it) { 01002 B.generateNode(RS, *it, (*it)->getState()); 01003 } 01004 } 01005 }