LLVM API Documentation

MemoryDependenceAnalysis.cpp
Go to the documentation of this file.
00001 //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file implements an analysis that determines, for a given memory
00011 // operation, what preceding memory operations it depends on.  It builds on
00012 // alias analysis information, and tries to provide a lazy, caching interface to
00013 // a common kind of alias information query.
00014 //
00015 //===----------------------------------------------------------------------===//
00016 
00017 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
00018 #include "llvm/ADT/STLExtras.h"
00019 #include "llvm/ADT/Statistic.h"
00020 #include "llvm/Analysis/AliasAnalysis.h"
00021 #include "llvm/Analysis/AssumptionTracker.h"
00022 #include "llvm/Analysis/InstructionSimplify.h"
00023 #include "llvm/Analysis/MemoryBuiltins.h"
00024 #include "llvm/Analysis/PHITransAddr.h"
00025 #include "llvm/Analysis/ValueTracking.h"
00026 #include "llvm/IR/DataLayout.h"
00027 #include "llvm/IR/Dominators.h"
00028 #include "llvm/IR/Function.h"
00029 #include "llvm/IR/Instructions.h"
00030 #include "llvm/IR/IntrinsicInst.h"
00031 #include "llvm/IR/LLVMContext.h"
00032 #include "llvm/IR/PredIteratorCache.h"
00033 #include "llvm/Support/Debug.h"
00034 using namespace llvm;
00035 
00036 #define DEBUG_TYPE "memdep"
00037 
00038 STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
00039 STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
00040 STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
00041 
00042 STATISTIC(NumCacheNonLocalPtr,
00043           "Number of fully cached non-local ptr responses");
00044 STATISTIC(NumCacheDirtyNonLocalPtr,
00045           "Number of cached, but dirty, non-local ptr responses");
00046 STATISTIC(NumUncacheNonLocalPtr,
00047           "Number of uncached non-local ptr responses");
00048 STATISTIC(NumCacheCompleteNonLocalPtr,
00049           "Number of block queries that were completely cached");
00050 
00051 // Limit for the number of instructions to scan in a block.
00052 static const int BlockScanLimit = 100;
00053 
00054 char MemoryDependenceAnalysis::ID = 0;
00055 
00056 // Register this pass...
00057 INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep",
00058                 "Memory Dependence Analysis", false, true)
00059 INITIALIZE_PASS_DEPENDENCY(AssumptionTracker)
00060 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
00061 INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep",
00062                       "Memory Dependence Analysis", false, true)
00063 
00064 MemoryDependenceAnalysis::MemoryDependenceAnalysis()
00065     : FunctionPass(ID), PredCache() {
00066   initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry());
00067 }
00068 MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
00069 }
00070 
00071 /// Clean up memory in between runs
00072 void MemoryDependenceAnalysis::releaseMemory() {
00073   LocalDeps.clear();
00074   NonLocalDeps.clear();
00075   NonLocalPointerDeps.clear();
00076   ReverseLocalDeps.clear();
00077   ReverseNonLocalDeps.clear();
00078   ReverseNonLocalPtrDeps.clear();
00079   PredCache->clear();
00080 }
00081 
00082 
00083 
00084 /// getAnalysisUsage - Does not modify anything.  It uses Alias Analysis.
00085 ///
00086 void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
00087   AU.setPreservesAll();
00088   AU.addRequired<AssumptionTracker>();
00089   AU.addRequiredTransitive<AliasAnalysis>();
00090 }
00091 
00092 bool MemoryDependenceAnalysis::runOnFunction(Function &) {
00093   AA = &getAnalysis<AliasAnalysis>();
00094   AT = &getAnalysis<AssumptionTracker>();
00095   DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
00096   DL = DLP ? &DLP->getDataLayout() : nullptr;
00097   DominatorTreeWrapperPass *DTWP =
00098       getAnalysisIfAvailable<DominatorTreeWrapperPass>();
00099   DT = DTWP ? &DTWP->getDomTree() : nullptr;
00100   if (!PredCache)
00101     PredCache.reset(new PredIteratorCache());
00102   return false;
00103 }
00104 
00105 /// RemoveFromReverseMap - This is a helper function that removes Val from
00106 /// 'Inst's set in ReverseMap.  If the set becomes empty, remove Inst's entry.
00107 template <typename KeyTy>
00108 static void RemoveFromReverseMap(DenseMap<Instruction*,
00109                                  SmallPtrSet<KeyTy, 4> > &ReverseMap,
00110                                  Instruction *Inst, KeyTy Val) {
00111   typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator
00112   InstIt = ReverseMap.find(Inst);
00113   assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
00114   bool Found = InstIt->second.erase(Val);
00115   assert(Found && "Invalid reverse map!"); (void)Found;
00116   if (InstIt->second.empty())
00117     ReverseMap.erase(InstIt);
00118 }
00119 
00120 /// GetLocation - If the given instruction references a specific memory
00121 /// location, fill in Loc with the details, otherwise set Loc.Ptr to null.
00122 /// Return a ModRefInfo value describing the general behavior of the
00123 /// instruction.
00124 static
00125 AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
00126                                         AliasAnalysis::Location &Loc,
00127                                         AliasAnalysis *AA) {
00128   if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
00129     if (LI->isUnordered()) {
00130       Loc = AA->getLocation(LI);
00131       return AliasAnalysis::Ref;
00132     }
00133     if (LI->getOrdering() == Monotonic) {
00134       Loc = AA->getLocation(LI);
00135       return AliasAnalysis::ModRef;
00136     }
00137     Loc = AliasAnalysis::Location();
00138     return AliasAnalysis::ModRef;
00139   }
00140 
00141   if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
00142     if (SI->isUnordered()) {
00143       Loc = AA->getLocation(SI);
00144       return AliasAnalysis::Mod;
00145     }
00146     if (SI->getOrdering() == Monotonic) {
00147       Loc = AA->getLocation(SI);
00148       return AliasAnalysis::ModRef;
00149     }
00150     Loc = AliasAnalysis::Location();
00151     return AliasAnalysis::ModRef;
00152   }
00153 
00154   if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
00155     Loc = AA->getLocation(V);
00156     return AliasAnalysis::ModRef;
00157   }
00158 
00159   if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
00160     // calls to free() deallocate the entire structure
00161     Loc = AliasAnalysis::Location(CI->getArgOperand(0));
00162     return AliasAnalysis::Mod;
00163   }
00164 
00165   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
00166     AAMDNodes AAInfo;
00167 
00168     switch (II->getIntrinsicID()) {
00169     case Intrinsic::lifetime_start:
00170     case Intrinsic::lifetime_end:
00171     case Intrinsic::invariant_start:
00172       II->getAAMetadata(AAInfo);
00173       Loc = AliasAnalysis::Location(II->getArgOperand(1),
00174                                     cast<ConstantInt>(II->getArgOperand(0))
00175                                       ->getZExtValue(), AAInfo);
00176       // These intrinsics don't really modify the memory, but returning Mod
00177       // will allow them to be handled conservatively.
00178       return AliasAnalysis::Mod;
00179     case Intrinsic::invariant_end:
00180       II->getAAMetadata(AAInfo);
00181       Loc = AliasAnalysis::Location(II->getArgOperand(2),
00182                                     cast<ConstantInt>(II->getArgOperand(1))
00183                                       ->getZExtValue(), AAInfo);
00184       // These intrinsics don't really modify the memory, but returning Mod
00185       // will allow them to be handled conservatively.
00186       return AliasAnalysis::Mod;
00187     default:
00188       break;
00189     }
00190   }
00191 
00192   // Otherwise, just do the coarse-grained thing that always works.
00193   if (Inst->mayWriteToMemory())
00194     return AliasAnalysis::ModRef;
00195   if (Inst->mayReadFromMemory())
00196     return AliasAnalysis::Ref;
00197   return AliasAnalysis::NoModRef;
00198 }
00199 
00200 /// getCallSiteDependencyFrom - Private helper for finding the local
00201 /// dependencies of a call site.
00202 MemDepResult MemoryDependenceAnalysis::
00203 getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
00204                           BasicBlock::iterator ScanIt, BasicBlock *BB) {
00205   unsigned Limit = BlockScanLimit;
00206 
00207   // Walk backwards through the block, looking for dependencies
00208   while (ScanIt != BB->begin()) {
00209     // Limit the amount of scanning we do so we don't end up with quadratic
00210     // running time on extreme testcases.
00211     --Limit;
00212     if (!Limit)
00213       return MemDepResult::getUnknown();
00214 
00215     Instruction *Inst = --ScanIt;
00216 
00217     // If this inst is a memory op, get the pointer it accessed
00218     AliasAnalysis::Location Loc;
00219     AliasAnalysis::ModRefResult MR = GetLocation(Inst, Loc, AA);
00220     if (Loc.Ptr) {
00221       // A simple instruction.
00222       if (AA->getModRefInfo(CS, Loc) != AliasAnalysis::NoModRef)
00223         return MemDepResult::getClobber(Inst);
00224       continue;
00225     }
00226 
00227     if (CallSite InstCS = cast<Value>(Inst)) {
00228       // Debug intrinsics don't cause dependences.
00229       if (isa<DbgInfoIntrinsic>(Inst)) continue;
00230       // If these two calls do not interfere, look past it.
00231       switch (AA->getModRefInfo(CS, InstCS)) {
00232       case AliasAnalysis::NoModRef:
00233         // If the two calls are the same, return InstCS as a Def, so that
00234         // CS can be found redundant and eliminated.
00235         if (isReadOnlyCall && !(MR & AliasAnalysis::Mod) &&
00236             CS.getInstruction()->isIdenticalToWhenDefined(Inst))
00237           return MemDepResult::getDef(Inst);
00238 
00239         // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
00240         // keep scanning.
00241         continue;
00242       default:
00243         return MemDepResult::getClobber(Inst);
00244       }
00245     }
00246 
00247     // If we could not obtain a pointer for the instruction and the instruction
00248     // touches memory then assume that this is a dependency.
00249     if (MR != AliasAnalysis::NoModRef)
00250       return MemDepResult::getClobber(Inst);
00251   }
00252 
00253   // No dependence found.  If this is the entry block of the function, it is
00254   // unknown, otherwise it is non-local.
00255   if (BB != &BB->getParent()->getEntryBlock())
00256     return MemDepResult::getNonLocal();
00257   return MemDepResult::getNonFuncLocal();
00258 }
00259 
00260 /// isLoadLoadClobberIfExtendedToFullWidth - Return true if LI is a load that
00261 /// would fully overlap MemLoc if done as a wider legal integer load.
00262 ///
00263 /// MemLocBase, MemLocOffset are lazily computed here the first time the
00264 /// base/offs of memloc is needed.
00265 static bool
00266 isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
00267                                        const Value *&MemLocBase,
00268                                        int64_t &MemLocOffs,
00269                                        const LoadInst *LI,
00270                                        const DataLayout *DL) {
00271   // If we have no target data, we can't do this.
00272   if (!DL) return false;
00273 
00274   // If we haven't already computed the base/offset of MemLoc, do so now.
00275   if (!MemLocBase)
00276     MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL);
00277 
00278   unsigned Size = MemoryDependenceAnalysis::
00279     getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
00280                                     LI, *DL);
00281   return Size != 0;
00282 }
00283 
00284 /// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that
00285 /// looks at a memory location for a load (specified by MemLocBase, Offs,
00286 /// and Size) and compares it against a load.  If the specified load could
00287 /// be safely widened to a larger integer load that is 1) still efficient,
00288 /// 2) safe for the target, and 3) would provide the specified memory
00289 /// location value, then this function returns the size in bytes of the
00290 /// load width to use.  If not, this returns zero.
00291 unsigned MemoryDependenceAnalysis::
00292 getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
00293                                 unsigned MemLocSize, const LoadInst *LI,
00294                                 const DataLayout &DL) {
00295   // We can only extend simple integer loads.
00296   if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
00297 
00298   // Load widening is hostile to ThreadSanitizer: it may cause false positives
00299   // or make the reports more cryptic (access sizes are wrong).
00300   if (LI->getParent()->getParent()->getAttributes().
00301       hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeThread))
00302     return 0;
00303 
00304   // Get the base of this load.
00305   int64_t LIOffs = 0;
00306   const Value *LIBase =
00307     GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &DL);
00308 
00309   // If the two pointers are not based on the same pointer, we can't tell that
00310   // they are related.
00311   if (LIBase != MemLocBase) return 0;
00312 
00313   // Okay, the two values are based on the same pointer, but returned as
00314   // no-alias.  This happens when we have things like two byte loads at "P+1"
00315   // and "P+3".  Check to see if increasing the size of the "LI" load up to its
00316   // alignment (or the largest native integer type) will allow us to load all
00317   // the bits required by MemLoc.
00318 
00319   // If MemLoc is before LI, then no widening of LI will help us out.
00320   if (MemLocOffs < LIOffs) return 0;
00321 
00322   // Get the alignment of the load in bytes.  We assume that it is safe to load
00323   // any legal integer up to this size without a problem.  For example, if we're
00324   // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
00325   // widen it up to an i32 load.  If it is known 2-byte aligned, we can widen it
00326   // to i16.
00327   unsigned LoadAlign = LI->getAlignment();
00328 
00329   int64_t MemLocEnd = MemLocOffs+MemLocSize;
00330 
00331   // If no amount of rounding up will let MemLoc fit into LI, then bail out.
00332   if (LIOffs+LoadAlign < MemLocEnd) return 0;
00333 
00334   // This is the size of the load to try.  Start with the next larger power of
00335   // two.
00336   unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits()/8U;
00337   NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
00338 
00339   while (1) {
00340     // If this load size is bigger than our known alignment or would not fit
00341     // into a native integer register, then we fail.
00342     if (NewLoadByteSize > LoadAlign ||
00343         !DL.fitsInLegalInteger(NewLoadByteSize*8))
00344       return 0;
00345 
00346     if (LIOffs+NewLoadByteSize > MemLocEnd &&
00347         LI->getParent()->getParent()->getAttributes().
00348           hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeAddress))
00349       // We will be reading past the location accessed by the original program.
00350       // While this is safe in a regular build, Address Safety analysis tools
00351       // may start reporting false warnings. So, don't do widening.
00352       return 0;
00353 
00354     // If a load of this width would include all of MemLoc, then we succeed.
00355     if (LIOffs+NewLoadByteSize >= MemLocEnd)
00356       return NewLoadByteSize;
00357 
00358     NewLoadByteSize <<= 1;
00359   }
00360 }
00361 
00362 /// getPointerDependencyFrom - Return the instruction on which a memory
00363 /// location depends.  If isLoad is true, this routine ignores may-aliases with
00364 /// read-only operations.  If isLoad is false, this routine ignores may-aliases
00365 /// with reads from read-only locations.  If possible, pass the query
00366 /// instruction as well; this function may take advantage of the metadata
00367 /// annotated to the query instruction to refine the result.
00368 MemDepResult MemoryDependenceAnalysis::
00369 getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
00370                          BasicBlock::iterator ScanIt, BasicBlock *BB,
00371                          Instruction *QueryInst) {
00372 
00373   const Value *MemLocBase = nullptr;
00374   int64_t MemLocOffset = 0;
00375   unsigned Limit = BlockScanLimit;
00376   bool isInvariantLoad = false;
00377 
00378   // We must be careful with atomic accesses, as they may allow another thread
00379   //   to touch this location, cloberring it. We are conservative: if the
00380   //   QueryInst is not a simple (non-atomic) memory access, we automatically
00381   //   return getClobber.
00382   // If it is simple, we know based on the results of
00383   // "Compiler testing via a theory of sound optimisations in the C11/C++11
00384   //   memory model" in PLDI 2013, that a non-atomic location can only be
00385   //   clobbered between a pair of a release and an acquire action, with no
00386   //   access to the location in between.
00387   // Here is an example for giving the general intuition behind this rule.
00388   // In the following code:
00389   //   store x 0;
00390   //   release action; [1]
00391   //   acquire action; [4]
00392   //   %val = load x;
00393   // It is unsafe to replace %val by 0 because another thread may be running:
00394   //   acquire action; [2]
00395   //   store x 42;
00396   //   release action; [3]
00397   // with synchronization from 1 to 2 and from 3 to 4, resulting in %val
00398   // being 42. A key property of this program however is that if either
00399   // 1 or 4 were missing, there would be a race between the store of 42
00400   // either the store of 0 or the load (making the whole progam racy).
00401   // The paper mentionned above shows that the same property is respected
00402   // by every program that can detect any optimisation of that kind: either
00403   // it is racy (undefined) or there is a release followed by an acquire
00404   // between the pair of accesses under consideration.
00405   bool HasSeenAcquire = false;
00406 
00407   if (isLoad && QueryInst) {
00408     LoadInst *LI = dyn_cast<LoadInst>(QueryInst);
00409     if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr)
00410       isInvariantLoad = true;
00411   }
00412 
00413   // Walk backwards through the basic block, looking for dependencies.
00414   while (ScanIt != BB->begin()) {
00415     Instruction *Inst = --ScanIt;
00416 
00417     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
00418       // Debug intrinsics don't (and can't) cause dependencies.
00419       if (isa<DbgInfoIntrinsic>(II)) continue;
00420 
00421     // Limit the amount of scanning we do so we don't end up with quadratic
00422     // running time on extreme testcases.
00423     --Limit;
00424     if (!Limit)
00425       return MemDepResult::getUnknown();
00426 
00427     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
00428       // If we reach a lifetime begin or end marker, then the query ends here
00429       // because the value is undefined.
00430       if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
00431         // FIXME: This only considers queries directly on the invariant-tagged
00432         // pointer, not on query pointers that are indexed off of them.  It'd
00433         // be nice to handle that at some point (the right approach is to use
00434         // GetPointerBaseWithConstantOffset).
00435         if (AA->isMustAlias(AliasAnalysis::Location(II->getArgOperand(1)),
00436                             MemLoc))
00437           return MemDepResult::getDef(II);
00438         continue;
00439       }
00440     }
00441 
00442     // Values depend on loads if the pointers are must aliased.  This means that
00443     // a load depends on another must aliased load from the same value.
00444     // One exception is atomic loads: a value can depend on an atomic load that it
00445     // does not alias with when this atomic load indicates that another thread may
00446     // be accessing the location.
00447     if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
00448       // Atomic loads have complications involved.
00449       // A Monotonic (or higher) load is OK if the query inst is itself not atomic.
00450       // An Acquire (or higher) load sets the HasSeenAcquire flag, so that any
00451       //   release store will know to return getClobber.
00452       // FIXME: This is overly conservative.
00453       if (!LI->isUnordered()) {
00454         if (!QueryInst)
00455           return MemDepResult::getClobber(LI);
00456         if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) {
00457           if (!QueryLI->isSimple())
00458             return MemDepResult::getClobber(LI);
00459         } else if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst)) {
00460           if (!QuerySI->isSimple())
00461             return MemDepResult::getClobber(LI);
00462         } else if (QueryInst->mayReadOrWriteMemory()) {
00463           return MemDepResult::getClobber(LI);
00464         }
00465 
00466         if (isAtLeastAcquire(LI->getOrdering()))
00467           HasSeenAcquire = true;
00468       }
00469 
00470       // FIXME: this is overly conservative.
00471       // While volatile access cannot be eliminated, they do not have to clobber
00472       // non-aliasing locations, as normal accesses can for example be reordered
00473       // with volatile accesses.
00474       if (LI->isVolatile())
00475         return MemDepResult::getClobber(LI);
00476 
00477       AliasAnalysis::Location LoadLoc = AA->getLocation(LI);
00478 
00479       // If we found a pointer, check if it could be the same as our pointer.
00480       AliasAnalysis::AliasResult R = AA->alias(LoadLoc, MemLoc);
00481 
00482       if (isLoad) {
00483         if (R == AliasAnalysis::NoAlias) {
00484           // If this is an over-aligned integer load (for example,
00485           // "load i8* %P, align 4") see if it would obviously overlap with the
00486           // queried location if widened to a larger load (e.g. if the queried
00487           // location is 1 byte at P+1).  If so, return it as a load/load
00488           // clobber result, allowing the client to decide to widen the load if
00489           // it wants to.
00490           if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
00491             if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
00492                 isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
00493                                                        MemLocOffset, LI, DL))
00494               return MemDepResult::getClobber(Inst);
00495 
00496           continue;
00497         }
00498 
00499         // Must aliased loads are defs of each other.
00500         if (R == AliasAnalysis::MustAlias)
00501           return MemDepResult::getDef(Inst);
00502 
00503 #if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
00504       // in terms of clobbering loads, but since it does this by looking
00505       // at the clobbering load directly, it doesn't know about any
00506       // phi translation that may have happened along the way.
00507 
00508         // If we have a partial alias, then return this as a clobber for the
00509         // client to handle.
00510         if (R == AliasAnalysis::PartialAlias)
00511           return MemDepResult::getClobber(Inst);
00512 #endif
00513 
00514         // Random may-alias loads don't depend on each other without a
00515         // dependence.
00516         continue;
00517       }
00518 
00519       // Stores don't depend on other no-aliased accesses.
00520       if (R == AliasAnalysis::NoAlias)
00521         continue;
00522 
00523       // Stores don't alias loads from read-only memory.
00524       if (AA->pointsToConstantMemory(LoadLoc))
00525         continue;
00526 
00527       // Stores depend on may/must aliased loads.
00528       return MemDepResult::getDef(Inst);
00529     }
00530 
00531     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
00532       // Atomic stores have complications involved.
00533       // A Monotonic store is OK if the query inst is itself not atomic.
00534       // A Release (or higher) store further requires that no acquire load
00535       //   has been seen.
00536       // FIXME: This is overly conservative.
00537       if (!SI->isUnordered()) {
00538         if (!QueryInst)
00539           return MemDepResult::getClobber(SI);
00540         if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) {
00541           if (!QueryLI->isSimple())
00542             return MemDepResult::getClobber(SI);
00543         } else if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst)) {
00544           if (!QuerySI->isSimple())
00545             return MemDepResult::getClobber(SI);
00546         } else if (QueryInst->mayReadOrWriteMemory()) {
00547           return MemDepResult::getClobber(SI);
00548         }
00549 
00550         if (HasSeenAcquire && isAtLeastRelease(SI->getOrdering()))
00551           return MemDepResult::getClobber(SI);
00552       }
00553 
00554       // FIXME: this is overly conservative.
00555       // While volatile access cannot be eliminated, they do not have to clobber
00556       // non-aliasing locations, as normal accesses can for example be reordered
00557       // with volatile accesses.
00558       if (SI->isVolatile())
00559         return MemDepResult::getClobber(SI);
00560 
00561       // If alias analysis can tell that this store is guaranteed to not modify
00562       // the query pointer, ignore it.  Use getModRefInfo to handle cases where
00563       // the query pointer points to constant memory etc.
00564       if (AA->getModRefInfo(SI, MemLoc) == AliasAnalysis::NoModRef)
00565         continue;
00566 
00567       // Ok, this store might clobber the query pointer.  Check to see if it is
00568       // a must alias: in this case, we want to return this as a def.
00569       AliasAnalysis::Location StoreLoc = AA->getLocation(SI);
00570 
00571       // If we found a pointer, check if it could be the same as our pointer.
00572       AliasAnalysis::AliasResult R = AA->alias(StoreLoc, MemLoc);
00573 
00574       if (R == AliasAnalysis::NoAlias)
00575         continue;
00576       if (R == AliasAnalysis::MustAlias)
00577         return MemDepResult::getDef(Inst);
00578       if (isInvariantLoad)
00579        continue;
00580       return MemDepResult::getClobber(Inst);
00581     }
00582 
00583     // If this is an allocation, and if we know that the accessed pointer is to
00584     // the allocation, return Def.  This means that there is no dependence and
00585     // the access can be optimized based on that.  For example, a load could
00586     // turn into undef.
00587     // Note: Only determine this to be a malloc if Inst is the malloc call, not
00588     // a subsequent bitcast of the malloc call result.  There can be stores to
00589     // the malloced memory between the malloc call and its bitcast uses, and we
00590     // need to continue scanning until the malloc call.
00591     const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo();
00592     if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) {
00593       const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
00594 
00595       if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
00596         return MemDepResult::getDef(Inst);
00597       // Be conservative if the accessed pointer may alias the allocation.
00598       if (AA->alias(Inst, AccessPtr) != AliasAnalysis::NoAlias)
00599         return MemDepResult::getClobber(Inst);
00600       // If the allocation is not aliased and does not read memory (like
00601       // strdup), it is safe to ignore.
00602       if (isa<AllocaInst>(Inst) ||
00603           isMallocLikeFn(Inst, TLI) || isCallocLikeFn(Inst, TLI))
00604         continue;
00605     }
00606 
00607     // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
00608     AliasAnalysis::ModRefResult MR = AA->getModRefInfo(Inst, MemLoc);
00609     // If necessary, perform additional analysis.
00610     if (MR == AliasAnalysis::ModRef)
00611       MR = AA->callCapturesBefore(Inst, MemLoc, DT);
00612     switch (MR) {
00613     case AliasAnalysis::NoModRef:
00614       // If the call has no effect on the queried pointer, just ignore it.
00615       continue;
00616     case AliasAnalysis::Mod:
00617       return MemDepResult::getClobber(Inst);
00618     case AliasAnalysis::Ref:
00619       // If the call is known to never store to the pointer, and if this is a
00620       // load query, we can safely ignore it (scan past it).
00621       if (isLoad)
00622         continue;
00623     default:
00624       // Otherwise, there is a potential dependence.  Return a clobber.
00625       return MemDepResult::getClobber(Inst);
00626     }
00627   }
00628 
00629   // No dependence found.  If this is the entry block of the function, it is
00630   // unknown, otherwise it is non-local.
00631   if (BB != &BB->getParent()->getEntryBlock())
00632     return MemDepResult::getNonLocal();
00633   return MemDepResult::getNonFuncLocal();
00634 }
00635 
00636 /// getDependency - Return the instruction on which a memory operation
00637 /// depends.
00638 MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
00639   Instruction *ScanPos = QueryInst;
00640 
00641   // Check for a cached result
00642   MemDepResult &LocalCache = LocalDeps[QueryInst];
00643 
00644   // If the cached entry is non-dirty, just return it.  Note that this depends
00645   // on MemDepResult's default constructing to 'dirty'.
00646   if (!LocalCache.isDirty())
00647     return LocalCache;
00648 
00649   // Otherwise, if we have a dirty entry, we know we can start the scan at that
00650   // instruction, which may save us some work.
00651   if (Instruction *Inst = LocalCache.getInst()) {
00652     ScanPos = Inst;
00653 
00654     RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
00655   }
00656 
00657   BasicBlock *QueryParent = QueryInst->getParent();
00658 
00659   // Do the scan.
00660   if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
00661     // No dependence found.  If this is the entry block of the function, it is
00662     // unknown, otherwise it is non-local.
00663     if (QueryParent != &QueryParent->getParent()->getEntryBlock())
00664       LocalCache = MemDepResult::getNonLocal();
00665     else
00666       LocalCache = MemDepResult::getNonFuncLocal();
00667   } else {
00668     AliasAnalysis::Location MemLoc;
00669     AliasAnalysis::ModRefResult MR = GetLocation(QueryInst, MemLoc, AA);
00670     if (MemLoc.Ptr) {
00671       // If we can do a pointer scan, make it happen.
00672       bool isLoad = !(MR & AliasAnalysis::Mod);
00673       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
00674         isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
00675 
00676       LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
00677                                             QueryParent, QueryInst);
00678     } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
00679       CallSite QueryCS(QueryInst);
00680       bool isReadOnly = AA->onlyReadsMemory(QueryCS);
00681       LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
00682                                              QueryParent);
00683     } else
00684       // Non-memory instruction.
00685       LocalCache = MemDepResult::getUnknown();
00686   }
00687 
00688   // Remember the result!
00689   if (Instruction *I = LocalCache.getInst())
00690     ReverseLocalDeps[I].insert(QueryInst);
00691 
00692   return LocalCache;
00693 }
00694 
00695 #ifndef NDEBUG
00696 /// AssertSorted - This method is used when -debug is specified to verify that
00697 /// cache arrays are properly kept sorted.
00698 static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
00699                          int Count = -1) {
00700   if (Count == -1) Count = Cache.size();
00701   if (Count == 0) return;
00702 
00703   for (unsigned i = 1; i != unsigned(Count); ++i)
00704     assert(!(Cache[i] < Cache[i-1]) && "Cache isn't sorted!");
00705 }
00706 #endif
00707 
00708 /// getNonLocalCallDependency - Perform a full dependency query for the
00709 /// specified call, returning the set of blocks that the value is
00710 /// potentially live across.  The returned set of results will include a
00711 /// "NonLocal" result for all blocks where the value is live across.
00712 ///
00713 /// This method assumes the instruction returns a "NonLocal" dependency
00714 /// within its own block.
00715 ///
00716 /// This returns a reference to an internal data structure that may be
00717 /// invalidated on the next non-local query or when an instruction is
00718 /// removed.  Clients must copy this data if they want it around longer than
00719 /// that.
00720 const MemoryDependenceAnalysis::NonLocalDepInfo &
00721 MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
00722   assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
00723  "getNonLocalCallDependency should only be used on calls with non-local deps!");
00724   PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
00725   NonLocalDepInfo &Cache = CacheP.first;
00726 
00727   /// DirtyBlocks - This is the set of blocks that need to be recomputed.  In
00728   /// the cached case, this can happen due to instructions being deleted etc. In
00729   /// the uncached case, this starts out as the set of predecessors we care
00730   /// about.
00731   SmallVector<BasicBlock*, 32> DirtyBlocks;
00732 
00733   if (!Cache.empty()) {
00734     // Okay, we have a cache entry.  If we know it is not dirty, just return it
00735     // with no computation.
00736     if (!CacheP.second) {
00737       ++NumCacheNonLocal;
00738       return Cache;
00739     }
00740 
00741     // If we already have a partially computed set of results, scan them to
00742     // determine what is dirty, seeding our initial DirtyBlocks worklist.
00743     for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end();
00744        I != E; ++I)
00745       if (I->getResult().isDirty())
00746         DirtyBlocks.push_back(I->getBB());
00747 
00748     // Sort the cache so that we can do fast binary search lookups below.
00749     std::sort(Cache.begin(), Cache.end());
00750 
00751     ++NumCacheDirtyNonLocal;
00752     //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
00753     //     << Cache.size() << " cached: " << *QueryInst;
00754   } else {
00755     // Seed DirtyBlocks with each of the preds of QueryInst's block.
00756     BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
00757     for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
00758       DirtyBlocks.push_back(*PI);
00759     ++NumUncacheNonLocal;
00760   }
00761 
00762   // isReadonlyCall - If this is a read-only call, we can be more aggressive.
00763   bool isReadonlyCall = AA->onlyReadsMemory(QueryCS);
00764 
00765   SmallPtrSet<BasicBlock*, 64> Visited;
00766 
00767   unsigned NumSortedEntries = Cache.size();
00768   DEBUG(AssertSorted(Cache));
00769 
00770   // Iterate while we still have blocks to update.
00771   while (!DirtyBlocks.empty()) {
00772     BasicBlock *DirtyBB = DirtyBlocks.back();
00773     DirtyBlocks.pop_back();
00774 
00775     // Already processed this block?
00776     if (!Visited.insert(DirtyBB))
00777       continue;
00778 
00779     // Do a binary search to see if we already have an entry for this block in
00780     // the cache set.  If so, find it.
00781     DEBUG(AssertSorted(Cache, NumSortedEntries));
00782     NonLocalDepInfo::iterator Entry =
00783       std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
00784                        NonLocalDepEntry(DirtyBB));
00785     if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
00786       --Entry;
00787 
00788     NonLocalDepEntry *ExistingResult = nullptr;
00789     if (Entry != Cache.begin()+NumSortedEntries &&
00790         Entry->getBB() == DirtyBB) {
00791       // If we already have an entry, and if it isn't already dirty, the block
00792       // is done.
00793       if (!Entry->getResult().isDirty())
00794         continue;
00795 
00796       // Otherwise, remember this slot so we can update the value.
00797       ExistingResult = &*Entry;
00798     }
00799 
00800     // If the dirty entry has a pointer, start scanning from it so we don't have
00801     // to rescan the entire block.
00802     BasicBlock::iterator ScanPos = DirtyBB->end();
00803     if (ExistingResult) {
00804       if (Instruction *Inst = ExistingResult->getResult().getInst()) {
00805         ScanPos = Inst;
00806         // We're removing QueryInst's use of Inst.
00807         RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
00808                              QueryCS.getInstruction());
00809       }
00810     }
00811 
00812     // Find out if this block has a local dependency for QueryInst.
00813     MemDepResult Dep;
00814 
00815     if (ScanPos != DirtyBB->begin()) {
00816       Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB);
00817     } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
00818       // No dependence found.  If this is the entry block of the function, it is
00819       // a clobber, otherwise it is unknown.
00820       Dep = MemDepResult::getNonLocal();
00821     } else {
00822       Dep = MemDepResult::getNonFuncLocal();
00823     }
00824 
00825     // If we had a dirty entry for the block, update it.  Otherwise, just add
00826     // a new entry.
00827     if (ExistingResult)
00828       ExistingResult->setResult(Dep);
00829     else
00830       Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
00831 
00832     // If the block has a dependency (i.e. it isn't completely transparent to
00833     // the value), remember the association!
00834     if (!Dep.isNonLocal()) {
00835       // Keep the ReverseNonLocalDeps map up to date so we can efficiently
00836       // update this when we remove instructions.
00837       if (Instruction *Inst = Dep.getInst())
00838         ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
00839     } else {
00840 
00841       // If the block *is* completely transparent to the load, we need to check
00842       // the predecessors of this block.  Add them to our worklist.
00843       for (BasicBlock **PI = PredCache->GetPreds(DirtyBB); *PI; ++PI)
00844         DirtyBlocks.push_back(*PI);
00845     }
00846   }
00847 
00848   return Cache;
00849 }
00850 
00851 /// getNonLocalPointerDependency - Perform a full dependency query for an
00852 /// access to the specified (non-volatile) memory location, returning the
00853 /// set of instructions that either define or clobber the value.
00854 ///
00855 /// This method assumes the pointer has a "NonLocal" dependency within its
00856 /// own block.
00857 ///
00858 void MemoryDependenceAnalysis::
00859 getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad,
00860                              BasicBlock *FromBB,
00861                              SmallVectorImpl<NonLocalDepResult> &Result) {
00862   assert(Loc.Ptr->getType()->isPointerTy() &&
00863          "Can't get pointer deps of a non-pointer!");
00864   Result.clear();
00865 
00866   PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, AT);
00867 
00868   // This is the set of blocks we've inspected, and the pointer we consider in
00869   // each block.  Because of critical edges, we currently bail out if querying
00870   // a block with multiple different pointers.  This can happen during PHI
00871   // translation.
00872   DenseMap<BasicBlock*, Value*> Visited;
00873   if (!getNonLocalPointerDepFromBB(Address, Loc, isLoad, FromBB,
00874                                    Result, Visited, true))
00875     return;
00876   Result.clear();
00877   Result.push_back(NonLocalDepResult(FromBB,
00878                                      MemDepResult::getUnknown(),
00879                                      const_cast<Value *>(Loc.Ptr)));
00880 }
00881 
00882 /// GetNonLocalInfoForBlock - Compute the memdep value for BB with
00883 /// Pointer/PointeeSize using either cached information in Cache or by doing a
00884 /// lookup (which may use dirty cache info if available).  If we do a lookup,
00885 /// add the result to the cache.
00886 MemDepResult MemoryDependenceAnalysis::
00887 GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc,
00888                         bool isLoad, BasicBlock *BB,
00889                         NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
00890 
00891   // Do a binary search to see if we already have an entry for this block in
00892   // the cache set.  If so, find it.
00893   NonLocalDepInfo::iterator Entry =
00894     std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries,
00895                      NonLocalDepEntry(BB));
00896   if (Entry != Cache->begin() && (Entry-1)->getBB() == BB)
00897     --Entry;
00898 
00899   NonLocalDepEntry *ExistingResult = nullptr;
00900   if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB)
00901     ExistingResult = &*Entry;
00902 
00903   // If we have a cached entry, and it is non-dirty, use it as the value for
00904   // this dependency.
00905   if (ExistingResult && !ExistingResult->getResult().isDirty()) {
00906     ++NumCacheNonLocalPtr;
00907     return ExistingResult->getResult();
00908   }
00909 
00910   // Otherwise, we have to scan for the value.  If we have a dirty cache
00911   // entry, start scanning from its position, otherwise we scan from the end
00912   // of the block.
00913   BasicBlock::iterator ScanPos = BB->end();
00914   if (ExistingResult && ExistingResult->getResult().getInst()) {
00915     assert(ExistingResult->getResult().getInst()->getParent() == BB &&
00916            "Instruction invalidated?");
00917     ++NumCacheDirtyNonLocalPtr;
00918     ScanPos = ExistingResult->getResult().getInst();
00919 
00920     // Eliminating the dirty entry from 'Cache', so update the reverse info.
00921     ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
00922     RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
00923   } else {
00924     ++NumUncacheNonLocalPtr;
00925   }
00926 
00927   // Scan the block for the dependency.
00928   MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB);
00929 
00930   // If we had a dirty entry for the block, update it.  Otherwise, just add
00931   // a new entry.
00932   if (ExistingResult)
00933     ExistingResult->setResult(Dep);
00934   else
00935     Cache->push_back(NonLocalDepEntry(BB, Dep));
00936 
00937   // If the block has a dependency (i.e. it isn't completely transparent to
00938   // the value), remember the reverse association because we just added it
00939   // to Cache!
00940   if (!Dep.isDef() && !Dep.isClobber())
00941     return Dep;
00942 
00943   // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
00944   // update MemDep when we remove instructions.
00945   Instruction *Inst = Dep.getInst();
00946   assert(Inst && "Didn't depend on anything?");
00947   ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
00948   ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
00949   return Dep;
00950 }
00951 
00952 /// SortNonLocalDepInfoCache - Sort the NonLocalDepInfo cache, given a certain
00953 /// number of elements in the array that are already properly ordered.  This is
00954 /// optimized for the case when only a few entries are added.
00955 static void
00956 SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
00957                          unsigned NumSortedEntries) {
00958   switch (Cache.size() - NumSortedEntries) {
00959   case 0:
00960     // done, no new entries.
00961     break;
00962   case 2: {
00963     // Two new entries, insert the last one into place.
00964     NonLocalDepEntry Val = Cache.back();
00965     Cache.pop_back();
00966     MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
00967       std::upper_bound(Cache.begin(), Cache.end()-1, Val);
00968     Cache.insert(Entry, Val);
00969     // FALL THROUGH.
00970   }
00971   case 1:
00972     // One new entry, Just insert the new value at the appropriate position.
00973     if (Cache.size() != 1) {
00974       NonLocalDepEntry Val = Cache.back();
00975       Cache.pop_back();
00976       MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
00977         std::upper_bound(Cache.begin(), Cache.end(), Val);
00978       Cache.insert(Entry, Val);
00979     }
00980     break;
00981   default:
00982     // Added many values, do a full scale sort.
00983     std::sort(Cache.begin(), Cache.end());
00984     break;
00985   }
00986 }
00987 
00988 /// getNonLocalPointerDepFromBB - Perform a dependency query based on
00989 /// pointer/pointeesize starting at the end of StartBB.  Add any clobber/def
00990 /// results to the results vector and keep track of which blocks are visited in
00991 /// 'Visited'.
00992 ///
00993 /// This has special behavior for the first block queries (when SkipFirstBlock
00994 /// is true).  In this special case, it ignores the contents of the specified
00995 /// block and starts returning dependence info for its predecessors.
00996 ///
00997 /// This function returns false on success, or true to indicate that it could
00998 /// not compute dependence information for some reason.  This should be treated
00999 /// as a clobber dependence on the first instruction in the predecessor block.
01000 bool MemoryDependenceAnalysis::
01001 getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
01002                             const AliasAnalysis::Location &Loc,
01003                             bool isLoad, BasicBlock *StartBB,
01004                             SmallVectorImpl<NonLocalDepResult> &Result,
01005                             DenseMap<BasicBlock*, Value*> &Visited,
01006                             bool SkipFirstBlock) {
01007   // Look up the cached info for Pointer.
01008   ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
01009 
01010   // Set up a temporary NLPI value. If the map doesn't yet have an entry for
01011   // CacheKey, this value will be inserted as the associated value. Otherwise,
01012   // it'll be ignored, and we'll have to check to see if the cached size and
01013   // aa tags are consistent with the current query.
01014   NonLocalPointerInfo InitialNLPI;
01015   InitialNLPI.Size = Loc.Size;
01016   InitialNLPI.AATags = Loc.AATags;
01017 
01018   // Get the NLPI for CacheKey, inserting one into the map if it doesn't
01019   // already have one.
01020   std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
01021     NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
01022   NonLocalPointerInfo *CacheInfo = &Pair.first->second;
01023 
01024   // If we already have a cache entry for this CacheKey, we may need to do some
01025   // work to reconcile the cache entry and the current query.
01026   if (!Pair.second) {
01027     if (CacheInfo->Size < Loc.Size) {
01028       // The query's Size is greater than the cached one. Throw out the
01029       // cached data and proceed with the query at the greater size.
01030       CacheInfo->Pair = BBSkipFirstBlockPair();
01031       CacheInfo->Size = Loc.Size;
01032       for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
01033            DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
01034         if (Instruction *Inst = DI->getResult().getInst())
01035           RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
01036       CacheInfo->NonLocalDeps.clear();
01037     } else if (CacheInfo->Size > Loc.Size) {
01038       // This query's Size is less than the cached one. Conservatively restart
01039       // the query using the greater size.
01040       return getNonLocalPointerDepFromBB(Pointer,
01041                                          Loc.getWithNewSize(CacheInfo->Size),
01042                                          isLoad, StartBB, Result, Visited,
01043                                          SkipFirstBlock);
01044     }
01045 
01046     // If the query's AATags are inconsistent with the cached one,
01047     // conservatively throw out the cached data and restart the query with
01048     // no tag if needed.
01049     if (CacheInfo->AATags != Loc.AATags) {
01050       if (CacheInfo->AATags) {
01051         CacheInfo->Pair = BBSkipFirstBlockPair();
01052         CacheInfo->AATags = AAMDNodes();
01053         for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
01054              DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
01055           if (Instruction *Inst = DI->getResult().getInst())
01056             RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
01057         CacheInfo->NonLocalDeps.clear();
01058       }
01059       if (Loc.AATags)
01060         return getNonLocalPointerDepFromBB(Pointer, Loc.getWithoutAATags(),
01061                                            isLoad, StartBB, Result, Visited,
01062                                            SkipFirstBlock);
01063     }
01064   }
01065 
01066   NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
01067 
01068   // If we have valid cached information for exactly the block we are
01069   // investigating, just return it with no recomputation.
01070   if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
01071     // We have a fully cached result for this query then we can just return the
01072     // cached results and populate the visited set.  However, we have to verify
01073     // that we don't already have conflicting results for these blocks.  Check
01074     // to ensure that if a block in the results set is in the visited set that
01075     // it was for the same pointer query.
01076     if (!Visited.empty()) {
01077       for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
01078            I != E; ++I) {
01079         DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->getBB());
01080         if (VI == Visited.end() || VI->second == Pointer.getAddr())
01081           continue;
01082 
01083         // We have a pointer mismatch in a block.  Just return clobber, saying
01084         // that something was clobbered in this result.  We could also do a
01085         // non-fully cached query, but there is little point in doing this.
01086         return true;
01087       }
01088     }
01089 
01090     Value *Addr = Pointer.getAddr();
01091     for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
01092          I != E; ++I) {
01093       Visited.insert(std::make_pair(I->getBB(), Addr));
01094       if (I->getResult().isNonLocal()) {
01095         continue;
01096       }
01097 
01098       if (!DT) {
01099         Result.push_back(NonLocalDepResult(I->getBB(),
01100                                            MemDepResult::getUnknown(),
01101                                            Addr));
01102       } else if (DT->isReachableFromEntry(I->getBB())) {
01103         Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr));
01104       }
01105     }
01106     ++NumCacheCompleteNonLocalPtr;
01107     return false;
01108   }
01109 
01110   // Otherwise, either this is a new block, a block with an invalid cache
01111   // pointer or one that we're about to invalidate by putting more info into it
01112   // than its valid cache info.  If empty, the result will be valid cache info,
01113   // otherwise it isn't.
01114   if (Cache->empty())
01115     CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
01116   else
01117     CacheInfo->Pair = BBSkipFirstBlockPair();
01118 
01119   SmallVector<BasicBlock*, 32> Worklist;
01120   Worklist.push_back(StartBB);
01121 
01122   // PredList used inside loop.
01123   SmallVector<std::pair<BasicBlock*, PHITransAddr>, 16> PredList;
01124 
01125   // Keep track of the entries that we know are sorted.  Previously cached
01126   // entries will all be sorted.  The entries we add we only sort on demand (we
01127   // don't insert every element into its sorted position).  We know that we
01128   // won't get any reuse from currently inserted values, because we don't
01129   // revisit blocks after we insert info for them.
01130   unsigned NumSortedEntries = Cache->size();
01131   DEBUG(AssertSorted(*Cache));
01132 
01133   while (!Worklist.empty()) {
01134     BasicBlock *BB = Worklist.pop_back_val();
01135 
01136     // Skip the first block if we have it.
01137     if (!SkipFirstBlock) {
01138       // Analyze the dependency of *Pointer in FromBB.  See if we already have
01139       // been here.
01140       assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
01141 
01142       // Get the dependency info for Pointer in BB.  If we have cached
01143       // information, we will use it, otherwise we compute it.
01144       DEBUG(AssertSorted(*Cache, NumSortedEntries));
01145       MemDepResult Dep = GetNonLocalInfoForBlock(Loc, isLoad, BB, Cache,
01146                                                  NumSortedEntries);
01147 
01148       // If we got a Def or Clobber, add this to the list of results.
01149       if (!Dep.isNonLocal()) {
01150         if (!DT) {
01151           Result.push_back(NonLocalDepResult(BB,
01152                                              MemDepResult::getUnknown(),
01153                                              Pointer.getAddr()));
01154           continue;
01155         } else if (DT->isReachableFromEntry(BB)) {
01156           Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
01157           continue;
01158         }
01159       }
01160     }
01161 
01162     // If 'Pointer' is an instruction defined in this block, then we need to do
01163     // phi translation to change it into a value live in the predecessor block.
01164     // If not, we just add the predecessors to the worklist and scan them with
01165     // the same Pointer.
01166     if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
01167       SkipFirstBlock = false;
01168       SmallVector<BasicBlock*, 16> NewBlocks;
01169       for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
01170         // Verify that we haven't looked at this block yet.
01171         std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
01172           InsertRes = Visited.insert(std::make_pair(*PI, Pointer.getAddr()));
01173         if (InsertRes.second) {
01174           // First time we've looked at *PI.
01175           NewBlocks.push_back(*PI);
01176           continue;
01177         }
01178 
01179         // If we have seen this block before, but it was with a different
01180         // pointer then we have a phi translation failure and we have to treat
01181         // this as a clobber.
01182         if (InsertRes.first->second != Pointer.getAddr()) {
01183           // Make sure to clean up the Visited map before continuing on to
01184           // PredTranslationFailure.
01185           for (unsigned i = 0; i < NewBlocks.size(); i++)
01186             Visited.erase(NewBlocks[i]);
01187           goto PredTranslationFailure;
01188         }
01189       }
01190       Worklist.append(NewBlocks.begin(), NewBlocks.end());
01191       continue;
01192     }
01193 
01194     // We do need to do phi translation, if we know ahead of time we can't phi
01195     // translate this value, don't even try.
01196     if (!Pointer.IsPotentiallyPHITranslatable())
01197       goto PredTranslationFailure;
01198 
01199     // We may have added values to the cache list before this PHI translation.
01200     // If so, we haven't done anything to ensure that the cache remains sorted.
01201     // Sort it now (if needed) so that recursive invocations of
01202     // getNonLocalPointerDepFromBB and other routines that could reuse the cache
01203     // value will only see properly sorted cache arrays.
01204     if (Cache && NumSortedEntries != Cache->size()) {
01205       SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
01206       NumSortedEntries = Cache->size();
01207     }
01208     Cache = nullptr;
01209 
01210     PredList.clear();
01211     for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
01212       BasicBlock *Pred = *PI;
01213       PredList.push_back(std::make_pair(Pred, Pointer));
01214 
01215       // Get the PHI translated pointer in this predecessor.  This can fail if
01216       // not translatable, in which case the getAddr() returns null.
01217       PHITransAddr &PredPointer = PredList.back().second;
01218       PredPointer.PHITranslateValue(BB, Pred, nullptr);
01219 
01220       Value *PredPtrVal = PredPointer.getAddr();
01221 
01222       // Check to see if we have already visited this pred block with another
01223       // pointer.  If so, we can't do this lookup.  This failure can occur
01224       // with PHI translation when a critical edge exists and the PHI node in
01225       // the successor translates to a pointer value different than the
01226       // pointer the block was first analyzed with.
01227       std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
01228         InsertRes = Visited.insert(std::make_pair(Pred, PredPtrVal));
01229 
01230       if (!InsertRes.second) {
01231         // We found the pred; take it off the list of preds to visit.
01232         PredList.pop_back();
01233 
01234         // If the predecessor was visited with PredPtr, then we already did
01235         // the analysis and can ignore it.
01236         if (InsertRes.first->second == PredPtrVal)
01237           continue;
01238 
01239         // Otherwise, the block was previously analyzed with a different
01240         // pointer.  We can't represent the result of this case, so we just
01241         // treat this as a phi translation failure.
01242 
01243         // Make sure to clean up the Visited map before continuing on to
01244         // PredTranslationFailure.
01245         for (unsigned i = 0, n = PredList.size(); i < n; ++i)
01246           Visited.erase(PredList[i].first);
01247 
01248         goto PredTranslationFailure;
01249       }
01250     }
01251 
01252     // Actually process results here; this need to be a separate loop to avoid
01253     // calling getNonLocalPointerDepFromBB for blocks we don't want to return
01254     // any results for.  (getNonLocalPointerDepFromBB will modify our
01255     // datastructures in ways the code after the PredTranslationFailure label
01256     // doesn't expect.)
01257     for (unsigned i = 0, n = PredList.size(); i < n; ++i) {
01258       BasicBlock *Pred = PredList[i].first;
01259       PHITransAddr &PredPointer = PredList[i].second;
01260       Value *PredPtrVal = PredPointer.getAddr();
01261 
01262       bool CanTranslate = true;
01263       // If PHI translation was unable to find an available pointer in this
01264       // predecessor, then we have to assume that the pointer is clobbered in
01265       // that predecessor.  We can still do PRE of the load, which would insert
01266       // a computation of the pointer in this predecessor.
01267       if (!PredPtrVal)
01268         CanTranslate = false;
01269 
01270       // FIXME: it is entirely possible that PHI translating will end up with
01271       // the same value.  Consider PHI translating something like:
01272       // X = phi [x, bb1], [y, bb2].  PHI translating for bb1 doesn't *need*
01273       // to recurse here, pedantically speaking.
01274 
01275       // If getNonLocalPointerDepFromBB fails here, that means the cached
01276       // result conflicted with the Visited list; we have to conservatively
01277       // assume it is unknown, but this also does not block PRE of the load.
01278       if (!CanTranslate ||
01279           getNonLocalPointerDepFromBB(PredPointer,
01280                                       Loc.getWithNewPtr(PredPtrVal),
01281                                       isLoad, Pred,
01282                                       Result, Visited)) {
01283         // Add the entry to the Result list.
01284         NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal);
01285         Result.push_back(Entry);
01286 
01287         // Since we had a phi translation failure, the cache for CacheKey won't
01288         // include all of the entries that we need to immediately satisfy future
01289         // queries.  Mark this in NonLocalPointerDeps by setting the
01290         // BBSkipFirstBlockPair pointer to null.  This requires reuse of the
01291         // cached value to do more work but not miss the phi trans failure.
01292         NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
01293         NLPI.Pair = BBSkipFirstBlockPair();
01294         continue;
01295       }
01296     }
01297 
01298     // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
01299     CacheInfo = &NonLocalPointerDeps[CacheKey];
01300     Cache = &CacheInfo->NonLocalDeps;
01301     NumSortedEntries = Cache->size();
01302 
01303     // Since we did phi translation, the "Cache" set won't contain all of the
01304     // results for the query.  This is ok (we can still use it to accelerate
01305     // specific block queries) but we can't do the fastpath "return all
01306     // results from the set"  Clear out the indicator for this.
01307     CacheInfo->Pair = BBSkipFirstBlockPair();
01308     SkipFirstBlock = false;
01309     continue;
01310 
01311   PredTranslationFailure:
01312     // The following code is "failure"; we can't produce a sane translation
01313     // for the given block.  It assumes that we haven't modified any of
01314     // our datastructures while processing the current block.
01315 
01316     if (!Cache) {
01317       // Refresh the CacheInfo/Cache pointer if it got invalidated.
01318       CacheInfo = &NonLocalPointerDeps[CacheKey];
01319       Cache = &CacheInfo->NonLocalDeps;
01320       NumSortedEntries = Cache->size();
01321     }
01322 
01323     // Since we failed phi translation, the "Cache" set won't contain all of the
01324     // results for the query.  This is ok (we can still use it to accelerate
01325     // specific block queries) but we can't do the fastpath "return all
01326     // results from the set".  Clear out the indicator for this.
01327     CacheInfo->Pair = BBSkipFirstBlockPair();
01328 
01329     // If *nothing* works, mark the pointer as unknown.
01330     //
01331     // If this is the magic first block, return this as a clobber of the whole
01332     // incoming value.  Since we can't phi translate to one of the predecessors,
01333     // we have to bail out.
01334     if (SkipFirstBlock)
01335       return true;
01336 
01337     for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) {
01338       assert(I != Cache->rend() && "Didn't find current block??");
01339       if (I->getBB() != BB)
01340         continue;
01341 
01342       assert(I->getResult().isNonLocal() &&
01343              "Should only be here with transparent block");
01344       I->setResult(MemDepResult::getUnknown());
01345       Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(),
01346                                          Pointer.getAddr()));
01347       break;
01348     }
01349   }
01350 
01351   // Okay, we're done now.  If we added new values to the cache, re-sort it.
01352   SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
01353   DEBUG(AssertSorted(*Cache));
01354   return false;
01355 }
01356 
01357 /// RemoveCachedNonLocalPointerDependencies - If P exists in
01358 /// CachedNonLocalPointerInfo, remove it.
01359 void MemoryDependenceAnalysis::
01360 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) {
01361   CachedNonLocalPointerInfo::iterator It =
01362     NonLocalPointerDeps.find(P);
01363   if (It == NonLocalPointerDeps.end()) return;
01364 
01365   // Remove all of the entries in the BB->val map.  This involves removing
01366   // instructions from the reverse map.
01367   NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
01368 
01369   for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
01370     Instruction *Target = PInfo[i].getResult().getInst();
01371     if (!Target) continue;  // Ignore non-local dep results.
01372     assert(Target->getParent() == PInfo[i].getBB());
01373 
01374     // Eliminating the dirty entry from 'Cache', so update the reverse info.
01375     RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
01376   }
01377 
01378   // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
01379   NonLocalPointerDeps.erase(It);
01380 }
01381 
01382 
01383 /// invalidateCachedPointerInfo - This method is used to invalidate cached
01384 /// information about the specified pointer, because it may be too
01385 /// conservative in memdep.  This is an optional call that can be used when
01386 /// the client detects an equivalence between the pointer and some other
01387 /// value and replaces the other value with ptr. This can make Ptr available
01388 /// in more places that cached info does not necessarily keep.
01389 void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) {
01390   // If Ptr isn't really a pointer, just ignore it.
01391   if (!Ptr->getType()->isPointerTy()) return;
01392   // Flush store info for the pointer.
01393   RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
01394   // Flush load info for the pointer.
01395   RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
01396 }
01397 
01398 /// invalidateCachedPredecessors - Clear the PredIteratorCache info.
01399 /// This needs to be done when the CFG changes, e.g., due to splitting
01400 /// critical edges.
01401 void MemoryDependenceAnalysis::invalidateCachedPredecessors() {
01402   PredCache->clear();
01403 }
01404 
01405 /// removeInstruction - Remove an instruction from the dependence analysis,
01406 /// updating the dependence of instructions that previously depended on it.
01407 /// This method attempts to keep the cache coherent using the reverse map.
01408 void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) {
01409   // Walk through the Non-local dependencies, removing this one as the value
01410   // for any cached queries.
01411   NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
01412   if (NLDI != NonLocalDeps.end()) {
01413     NonLocalDepInfo &BlockMap = NLDI->second.first;
01414     for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end();
01415          DI != DE; ++DI)
01416       if (Instruction *Inst = DI->getResult().getInst())
01417         RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
01418     NonLocalDeps.erase(NLDI);
01419   }
01420 
01421   // If we have a cached local dependence query for this instruction, remove it.
01422   //
01423   LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
01424   if (LocalDepEntry != LocalDeps.end()) {
01425     // Remove us from DepInst's reverse set now that the local dep info is gone.
01426     if (Instruction *Inst = LocalDepEntry->second.getInst())
01427       RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
01428 
01429     // Remove this local dependency info.
01430     LocalDeps.erase(LocalDepEntry);
01431   }
01432 
01433   // If we have any cached pointer dependencies on this instruction, remove
01434   // them.  If the instruction has non-pointer type, then it can't be a pointer
01435   // base.
01436 
01437   // Remove it from both the load info and the store info.  The instruction
01438   // can't be in either of these maps if it is non-pointer.
01439   if (RemInst->getType()->isPointerTy()) {
01440     RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
01441     RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
01442   }
01443 
01444   // Loop over all of the things that depend on the instruction we're removing.
01445   //
01446   SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd;
01447 
01448   // If we find RemInst as a clobber or Def in any of the maps for other values,
01449   // we need to replace its entry with a dirty version of the instruction after
01450   // it.  If RemInst is a terminator, we use a null dirty value.
01451   //
01452   // Using a dirty version of the instruction after RemInst saves having to scan
01453   // the entire block to get to this point.
01454   MemDepResult NewDirtyVal;
01455   if (!RemInst->isTerminator())
01456     NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst));
01457 
01458   ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
01459   if (ReverseDepIt != ReverseLocalDeps.end()) {
01460     // RemInst can't be the terminator if it has local stuff depending on it.
01461     assert(!ReverseDepIt->second.empty() && !isa<TerminatorInst>(RemInst) &&
01462            "Nothing can locally depend on a terminator");
01463 
01464     for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) {
01465       assert(InstDependingOnRemInst != RemInst &&
01466              "Already removed our local dep info");
01467 
01468       LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
01469 
01470       // Make sure to remember that new things depend on NewDepInst.
01471       assert(NewDirtyVal.getInst() && "There is no way something else can have "
01472              "a local dep on this if it is a terminator!");
01473       ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(),
01474                                                 InstDependingOnRemInst));
01475     }
01476 
01477     ReverseLocalDeps.erase(ReverseDepIt);
01478 
01479     // Add new reverse deps after scanning the set, to avoid invalidating the
01480     // 'ReverseDeps' reference.
01481     while (!ReverseDepsToAdd.empty()) {
01482       ReverseLocalDeps[ReverseDepsToAdd.back().first]
01483         .insert(ReverseDepsToAdd.back().second);
01484       ReverseDepsToAdd.pop_back();
01485     }
01486   }
01487 
01488   ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
01489   if (ReverseDepIt != ReverseNonLocalDeps.end()) {
01490     for (Instruction *I : ReverseDepIt->second) {
01491       assert(I != RemInst && "Already removed NonLocalDep info for RemInst");
01492 
01493       PerInstNLInfo &INLD = NonLocalDeps[I];
01494       // The information is now dirty!
01495       INLD.second = true;
01496 
01497       for (NonLocalDepInfo::iterator DI = INLD.first.begin(),
01498            DE = INLD.first.end(); DI != DE; ++DI) {
01499         if (DI->getResult().getInst() != RemInst) continue;
01500 
01501         // Convert to a dirty entry for the subsequent instruction.
01502         DI->setResult(NewDirtyVal);
01503 
01504         if (Instruction *NextI = NewDirtyVal.getInst())
01505           ReverseDepsToAdd.push_back(std::make_pair(NextI, I));
01506       }
01507     }
01508 
01509     ReverseNonLocalDeps.erase(ReverseDepIt);
01510 
01511     // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
01512     while (!ReverseDepsToAdd.empty()) {
01513       ReverseNonLocalDeps[ReverseDepsToAdd.back().first]
01514         .insert(ReverseDepsToAdd.back().second);
01515       ReverseDepsToAdd.pop_back();
01516     }
01517   }
01518 
01519   // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
01520   // value in the NonLocalPointerDeps info.
01521   ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
01522     ReverseNonLocalPtrDeps.find(RemInst);
01523   if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
01524     SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd;
01525 
01526     for (ValueIsLoadPair P : ReversePtrDepIt->second) {
01527       assert(P.getPointer() != RemInst &&
01528              "Already removed NonLocalPointerDeps info for RemInst");
01529 
01530       NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
01531 
01532       // The cache is not valid for any specific block anymore.
01533       NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
01534 
01535       // Update any entries for RemInst to use the instruction after it.
01536       for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end();
01537            DI != DE; ++DI) {
01538         if (DI->getResult().getInst() != RemInst) continue;
01539 
01540         // Convert to a dirty entry for the subsequent instruction.
01541         DI->setResult(NewDirtyVal);
01542 
01543         if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
01544           ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
01545       }
01546 
01547       // Re-sort the NonLocalDepInfo.  Changing the dirty entry to its
01548       // subsequent value may invalidate the sortedness.
01549       std::sort(NLPDI.begin(), NLPDI.end());
01550     }
01551 
01552     ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
01553 
01554     while (!ReversePtrDepsToAdd.empty()) {
01555       ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first]
01556         .insert(ReversePtrDepsToAdd.back().second);
01557       ReversePtrDepsToAdd.pop_back();
01558     }
01559   }
01560 
01561 
01562   assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
01563   AA->deleteValue(RemInst);
01564   DEBUG(verifyRemoved(RemInst));
01565 }
01566 /// verifyRemoved - Verify that the specified instruction does not occur
01567 /// in our internal data structures. This function verifies by asserting in
01568 /// debug builds.
01569 void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const {
01570 #ifndef NDEBUG
01571   for (LocalDepMapType::const_iterator I = LocalDeps.begin(),
01572        E = LocalDeps.end(); I != E; ++I) {
01573     assert(I->first != D && "Inst occurs in data structures");
01574     assert(I->second.getInst() != D &&
01575            "Inst occurs in data structures");
01576   }
01577 
01578   for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(),
01579        E = NonLocalPointerDeps.end(); I != E; ++I) {
01580     assert(I->first.getPointer() != D && "Inst occurs in NLPD map key");
01581     const NonLocalDepInfo &Val = I->second.NonLocalDeps;
01582     for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end();
01583          II != E; ++II)
01584       assert(II->getResult().getInst() != D && "Inst occurs as NLPD value");
01585   }
01586 
01587   for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(),
01588        E = NonLocalDeps.end(); I != E; ++I) {
01589     assert(I->first != D && "Inst occurs in data structures");
01590     const PerInstNLInfo &INLD = I->second;
01591     for (NonLocalDepInfo::const_iterator II = INLD.first.begin(),
01592          EE = INLD.first.end(); II  != EE; ++II)
01593       assert(II->getResult().getInst() != D && "Inst occurs in data structures");
01594   }
01595 
01596   for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(),
01597        E = ReverseLocalDeps.end(); I != E; ++I) {
01598     assert(I->first != D && "Inst occurs in data structures");
01599     for (Instruction *Inst : I->second)
01600       assert(Inst != D && "Inst occurs in data structures");
01601   }
01602 
01603   for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(),
01604        E = ReverseNonLocalDeps.end();
01605        I != E; ++I) {
01606     assert(I->first != D && "Inst occurs in data structures");
01607     for (Instruction *Inst : I->second)
01608       assert(Inst != D && "Inst occurs in data structures");
01609   }
01610 
01611   for (ReverseNonLocalPtrDepTy::const_iterator
01612        I = ReverseNonLocalPtrDeps.begin(),
01613        E = ReverseNonLocalPtrDeps.end(); I != E; ++I) {
01614     assert(I->first != D && "Inst occurs in rev NLPD map");
01615 
01616     for (ValueIsLoadPair P : I->second)
01617       assert(P != ValueIsLoadPair(D, false) &&
01618              P != ValueIsLoadPair(D, true) &&
01619              "Inst occurs in ReverseNonLocalPtrDeps map");
01620   }
01621 #endif
01622 }