LLVM API Documentation

ObjCARCOpts.cpp
Go to the documentation of this file.
00001 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 /// \file
00010 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
00011 /// Reference Counting and is a system for managing reference counts for objects
00012 /// in Objective C.
00013 ///
00014 /// The optimizations performed include elimination of redundant, partially
00015 /// redundant, and inconsequential reference count operations, elimination of
00016 /// redundant weak pointer operations, and numerous minor simplifications.
00017 ///
00018 /// WARNING: This file knows about certain library functions. It recognizes them
00019 /// by name, and hardwires knowledge of their semantics.
00020 ///
00021 /// WARNING: This file knows about how certain Objective-C library functions are
00022 /// used. Naive LLVM IR transformations which would otherwise be
00023 /// behavior-preserving may break these assumptions.
00024 ///
00025 //===----------------------------------------------------------------------===//
00026 
00027 #include "ObjCARC.h"
00028 #include "ARCRuntimeEntryPoints.h"
00029 #include "DependencyAnalysis.h"
00030 #include "ObjCARCAliasAnalysis.h"
00031 #include "ProvenanceAnalysis.h"
00032 #include "llvm/ADT/DenseMap.h"
00033 #include "llvm/ADT/DenseSet.h"
00034 #include "llvm/ADT/STLExtras.h"
00035 #include "llvm/ADT/SmallPtrSet.h"
00036 #include "llvm/ADT/Statistic.h"
00037 #include "llvm/IR/CFG.h"
00038 #include "llvm/IR/IRBuilder.h"
00039 #include "llvm/IR/LLVMContext.h"
00040 #include "llvm/Support/Debug.h"
00041 #include "llvm/Support/raw_ostream.h"
00042 
00043 using namespace llvm;
00044 using namespace llvm::objcarc;
00045 
00046 #define DEBUG_TYPE "objc-arc-opts"
00047 
00048 /// \defgroup MiscUtils Miscellaneous utilities that are not ARC specific.
00049 /// @{
00050 
00051 namespace {
00052   /// \brief An associative container with fast insertion-order (deterministic)
00053   /// iteration over its elements. Plus the special blot operation.
00054   template<class KeyT, class ValueT>
00055   class MapVector {
00056     /// Map keys to indices in Vector.
00057     typedef DenseMap<KeyT, size_t> MapTy;
00058     MapTy Map;
00059 
00060     typedef std::vector<std::pair<KeyT, ValueT> > VectorTy;
00061     /// Keys and values.
00062     VectorTy Vector;
00063 
00064   public:
00065     typedef typename VectorTy::iterator iterator;
00066     typedef typename VectorTy::const_iterator const_iterator;
00067     iterator begin() { return Vector.begin(); }
00068     iterator end() { return Vector.end(); }
00069     const_iterator begin() const { return Vector.begin(); }
00070     const_iterator end() const { return Vector.end(); }
00071 
00072 #ifdef XDEBUG
00073     ~MapVector() {
00074       assert(Vector.size() >= Map.size()); // May differ due to blotting.
00075       for (typename MapTy::const_iterator I = Map.begin(), E = Map.end();
00076            I != E; ++I) {
00077         assert(I->second < Vector.size());
00078         assert(Vector[I->second].first == I->first);
00079       }
00080       for (typename VectorTy::const_iterator I = Vector.begin(),
00081            E = Vector.end(); I != E; ++I)
00082         assert(!I->first ||
00083                (Map.count(I->first) &&
00084                 Map[I->first] == size_t(I - Vector.begin())));
00085     }
00086 #endif
00087 
00088     ValueT &operator[](const KeyT &Arg) {
00089       std::pair<typename MapTy::iterator, bool> Pair =
00090         Map.insert(std::make_pair(Arg, size_t(0)));
00091       if (Pair.second) {
00092         size_t Num = Vector.size();
00093         Pair.first->second = Num;
00094         Vector.push_back(std::make_pair(Arg, ValueT()));
00095         return Vector[Num].second;
00096       }
00097       return Vector[Pair.first->second].second;
00098     }
00099 
00100     std::pair<iterator, bool>
00101     insert(const std::pair<KeyT, ValueT> &InsertPair) {
00102       std::pair<typename MapTy::iterator, bool> Pair =
00103         Map.insert(std::make_pair(InsertPair.first, size_t(0)));
00104       if (Pair.second) {
00105         size_t Num = Vector.size();
00106         Pair.first->second = Num;
00107         Vector.push_back(InsertPair);
00108         return std::make_pair(Vector.begin() + Num, true);
00109       }
00110       return std::make_pair(Vector.begin() + Pair.first->second, false);
00111     }
00112 
00113     iterator find(const KeyT &Key) {
00114       typename MapTy::iterator It = Map.find(Key);
00115       if (It == Map.end()) return Vector.end();
00116       return Vector.begin() + It->second;
00117     }
00118 
00119     const_iterator find(const KeyT &Key) const {
00120       typename MapTy::const_iterator It = Map.find(Key);
00121       if (It == Map.end()) return Vector.end();
00122       return Vector.begin() + It->second;
00123     }
00124 
00125     /// This is similar to erase, but instead of removing the element from the
00126     /// vector, it just zeros out the key in the vector. This leaves iterators
00127     /// intact, but clients must be prepared for zeroed-out keys when iterating.
00128     void blot(const KeyT &Key) {
00129       typename MapTy::iterator It = Map.find(Key);
00130       if (It == Map.end()) return;
00131       Vector[It->second].first = KeyT();
00132       Map.erase(It);
00133     }
00134 
00135     void clear() {
00136       Map.clear();
00137       Vector.clear();
00138     }
00139   };
00140 }
00141 
00142 /// @}
00143 ///
00144 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
00145 /// @{
00146 
00147 /// \brief This is similar to StripPointerCastsAndObjCCalls but it stops as soon
00148 /// as it finds a value with multiple uses.
00149 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
00150   if (Arg->hasOneUse()) {
00151     if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
00152       return FindSingleUseIdentifiedObject(BC->getOperand(0));
00153     if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
00154       if (GEP->hasAllZeroIndices())
00155         return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
00156     if (IsForwarding(GetBasicInstructionClass(Arg)))
00157       return FindSingleUseIdentifiedObject(
00158                cast<CallInst>(Arg)->getArgOperand(0));
00159     if (!IsObjCIdentifiedObject(Arg))
00160       return nullptr;
00161     return Arg;
00162   }
00163 
00164   // If we found an identifiable object but it has multiple uses, but they are
00165   // trivial uses, we can still consider this to be a single-use value.
00166   if (IsObjCIdentifiedObject(Arg)) {
00167     for (const User *U : Arg->users())
00168       if (!U->use_empty() || StripPointerCastsAndObjCCalls(U) != Arg)
00169          return nullptr;
00170 
00171     return Arg;
00172   }
00173 
00174   return nullptr;
00175 }
00176 
00177 /// This is a wrapper around getUnderlyingObjCPtr along the lines of
00178 /// GetUnderlyingObjects except that it returns early when it sees the first
00179 /// alloca.
00180 static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V) {
00181   SmallPtrSet<const Value *, 4> Visited;
00182   SmallVector<const Value *, 4> Worklist;
00183   Worklist.push_back(V);
00184   do {
00185     const Value *P = Worklist.pop_back_val();
00186     P = GetUnderlyingObjCPtr(P);
00187 
00188     if (isa<AllocaInst>(P))
00189       return true;
00190 
00191     if (!Visited.insert(P))
00192       continue;
00193 
00194     if (const SelectInst *SI = dyn_cast<const SelectInst>(P)) {
00195       Worklist.push_back(SI->getTrueValue());
00196       Worklist.push_back(SI->getFalseValue());
00197       continue;
00198     }
00199 
00200     if (const PHINode *PN = dyn_cast<const PHINode>(P)) {
00201       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
00202         Worklist.push_back(PN->getIncomingValue(i));
00203       continue;
00204     }
00205   } while (!Worklist.empty());
00206 
00207   return false;
00208 }
00209 
00210 
00211 /// @}
00212 ///
00213 /// \defgroup ARCOpt ARC Optimization.
00214 /// @{
00215 
00216 // TODO: On code like this:
00217 //
00218 // objc_retain(%x)
00219 // stuff_that_cannot_release()
00220 // objc_autorelease(%x)
00221 // stuff_that_cannot_release()
00222 // objc_retain(%x)
00223 // stuff_that_cannot_release()
00224 // objc_autorelease(%x)
00225 //
00226 // The second retain and autorelease can be deleted.
00227 
00228 // TODO: It should be possible to delete
00229 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
00230 // pairs if nothing is actually autoreleased between them. Also, autorelease
00231 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
00232 // after inlining) can be turned into plain release calls.
00233 
00234 // TODO: Critical-edge splitting. If the optimial insertion point is
00235 // a critical edge, the current algorithm has to fail, because it doesn't
00236 // know how to split edges. It should be possible to make the optimizer
00237 // think in terms of edges, rather than blocks, and then split critical
00238 // edges on demand.
00239 
00240 // TODO: OptimizeSequences could generalized to be Interprocedural.
00241 
00242 // TODO: Recognize that a bunch of other objc runtime calls have
00243 // non-escaping arguments and non-releasing arguments, and may be
00244 // non-autoreleasing.
00245 
00246 // TODO: Sink autorelease calls as far as possible. Unfortunately we
00247 // usually can't sink them past other calls, which would be the main
00248 // case where it would be useful.
00249 
00250 // TODO: The pointer returned from objc_loadWeakRetained is retained.
00251 
00252 // TODO: Delete release+retain pairs (rare).
00253 
00254 STATISTIC(NumNoops,       "Number of no-op objc calls eliminated");
00255 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
00256 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
00257 STATISTIC(NumRets,        "Number of return value forwarding "
00258                           "retain+autoreleases eliminated");
00259 STATISTIC(NumRRs,         "Number of retain+release paths eliminated");
00260 STATISTIC(NumPeeps,       "Number of calls peephole-optimized");
00261 #ifndef NDEBUG
00262 STATISTIC(NumRetainsBeforeOpt,
00263           "Number of retains before optimization");
00264 STATISTIC(NumReleasesBeforeOpt,
00265           "Number of releases before optimization");
00266 STATISTIC(NumRetainsAfterOpt,
00267           "Number of retains after optimization");
00268 STATISTIC(NumReleasesAfterOpt,
00269           "Number of releases after optimization");
00270 #endif
00271 
00272 namespace {
00273   /// \enum Sequence
00274   ///
00275   /// \brief A sequence of states that a pointer may go through in which an
00276   /// objc_retain and objc_release are actually needed.
00277   enum Sequence {
00278     S_None,
00279     S_Retain,         ///< objc_retain(x).
00280     S_CanRelease,     ///< foo(x) -- x could possibly see a ref count decrement.
00281     S_Use,            ///< any use of x.
00282     S_Stop,           ///< like S_Release, but code motion is stopped.
00283     S_Release,        ///< objc_release(x).
00284     S_MovableRelease  ///< objc_release(x), !clang.imprecise_release.
00285   };
00286 
00287   raw_ostream &operator<<(raw_ostream &OS, const Sequence S)
00288     LLVM_ATTRIBUTE_UNUSED;
00289   raw_ostream &operator<<(raw_ostream &OS, const Sequence S) {
00290     switch (S) {
00291     case S_None:
00292       return OS << "S_None";
00293     case S_Retain:
00294       return OS << "S_Retain";
00295     case S_CanRelease:
00296       return OS << "S_CanRelease";
00297     case S_Use:
00298       return OS << "S_Use";
00299     case S_Release:
00300       return OS << "S_Release";
00301     case S_MovableRelease:
00302       return OS << "S_MovableRelease";
00303     case S_Stop:
00304       return OS << "S_Stop";
00305     }
00306     llvm_unreachable("Unknown sequence type.");
00307   }
00308 }
00309 
00310 static Sequence MergeSeqs(Sequence A, Sequence B, bool TopDown) {
00311   // The easy cases.
00312   if (A == B)
00313     return A;
00314   if (A == S_None || B == S_None)
00315     return S_None;
00316 
00317   if (A > B) std::swap(A, B);
00318   if (TopDown) {
00319     // Choose the side which is further along in the sequence.
00320     if ((A == S_Retain || A == S_CanRelease) &&
00321         (B == S_CanRelease || B == S_Use))
00322       return B;
00323   } else {
00324     // Choose the side which is further along in the sequence.
00325     if ((A == S_Use || A == S_CanRelease) &&
00326         (B == S_Use || B == S_Release || B == S_Stop || B == S_MovableRelease))
00327       return A;
00328     // If both sides are releases, choose the more conservative one.
00329     if (A == S_Stop && (B == S_Release || B == S_MovableRelease))
00330       return A;
00331     if (A == S_Release && B == S_MovableRelease)
00332       return A;
00333   }
00334 
00335   return S_None;
00336 }
00337 
00338 namespace {
00339   /// \brief Unidirectional information about either a
00340   /// retain-decrement-use-release sequence or release-use-decrement-retain
00341   /// reverse sequence.
00342   struct RRInfo {
00343     /// After an objc_retain, the reference count of the referenced
00344     /// object is known to be positive. Similarly, before an objc_release, the
00345     /// reference count of the referenced object is known to be positive. If
00346     /// there are retain-release pairs in code regions where the retain count
00347     /// is known to be positive, they can be eliminated, regardless of any side
00348     /// effects between them.
00349     ///
00350     /// Also, a retain+release pair nested within another retain+release
00351     /// pair all on the known same pointer value can be eliminated, regardless
00352     /// of any intervening side effects.
00353     ///
00354     /// KnownSafe is true when either of these conditions is satisfied.
00355     bool KnownSafe;
00356 
00357     /// True of the objc_release calls are all marked with the "tail" keyword.
00358     bool IsTailCallRelease;
00359 
00360     /// If the Calls are objc_release calls and they all have a
00361     /// clang.imprecise_release tag, this is the metadata tag.
00362     MDNode *ReleaseMetadata;
00363 
00364     /// For a top-down sequence, the set of objc_retains or
00365     /// objc_retainBlocks. For bottom-up, the set of objc_releases.
00366     SmallPtrSet<Instruction *, 2> Calls;
00367 
00368     /// The set of optimal insert positions for moving calls in the opposite
00369     /// sequence.
00370     SmallPtrSet<Instruction *, 2> ReverseInsertPts;
00371 
00372     /// If this is true, we cannot perform code motion but can still remove
00373     /// retain/release pairs.
00374     bool CFGHazardAfflicted;
00375 
00376     RRInfo() :
00377       KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(nullptr),
00378       CFGHazardAfflicted(false) {}
00379 
00380     void clear();
00381 
00382     /// Conservatively merge the two RRInfo. Returns true if a partial merge has
00383     /// occurred, false otherwise.
00384     bool Merge(const RRInfo &Other);
00385 
00386   };
00387 }
00388 
00389 void RRInfo::clear() {
00390   KnownSafe = false;
00391   IsTailCallRelease = false;
00392   ReleaseMetadata = nullptr;
00393   Calls.clear();
00394   ReverseInsertPts.clear();
00395   CFGHazardAfflicted = false;
00396 }
00397 
00398 bool RRInfo::Merge(const RRInfo &Other) {
00399     // Conservatively merge the ReleaseMetadata information.
00400     if (ReleaseMetadata != Other.ReleaseMetadata)
00401       ReleaseMetadata = nullptr;
00402 
00403     // Conservatively merge the boolean state.
00404     KnownSafe &= Other.KnownSafe;
00405     IsTailCallRelease &= Other.IsTailCallRelease;
00406     CFGHazardAfflicted |= Other.CFGHazardAfflicted;
00407 
00408     // Merge the call sets.
00409     Calls.insert(Other.Calls.begin(), Other.Calls.end());
00410 
00411     // Merge the insert point sets. If there are any differences,
00412     // that makes this a partial merge.
00413     bool Partial = ReverseInsertPts.size() != Other.ReverseInsertPts.size();
00414     for (Instruction *Inst : Other.ReverseInsertPts)
00415       Partial |= ReverseInsertPts.insert(Inst);
00416     return Partial;
00417 }
00418 
00419 namespace {
00420   /// \brief This class summarizes several per-pointer runtime properties which
00421   /// are propogated through the flow graph.
00422   class PtrState {
00423     /// True if the reference count is known to be incremented.
00424     bool KnownPositiveRefCount;
00425 
00426     /// True if we've seen an opportunity for partial RR elimination, such as
00427     /// pushing calls into a CFG triangle or into one side of a CFG diamond.
00428     bool Partial;
00429 
00430     /// The current position in the sequence.
00431     unsigned char Seq : 8;
00432 
00433     /// Unidirectional information about the current sequence.
00434     RRInfo RRI;
00435 
00436   public:
00437     PtrState() : KnownPositiveRefCount(false), Partial(false),
00438                  Seq(S_None) {}
00439 
00440 
00441     bool IsKnownSafe() const {
00442       return RRI.KnownSafe;
00443     }
00444 
00445     void SetKnownSafe(const bool NewValue) {
00446       RRI.KnownSafe = NewValue;
00447     }
00448 
00449     bool IsTailCallRelease() const {
00450       return RRI.IsTailCallRelease;
00451     }
00452 
00453     void SetTailCallRelease(const bool NewValue) {
00454       RRI.IsTailCallRelease = NewValue;
00455     }
00456 
00457     bool IsTrackingImpreciseReleases() const {
00458       return RRI.ReleaseMetadata != nullptr;
00459     }
00460 
00461     const MDNode *GetReleaseMetadata() const {
00462       return RRI.ReleaseMetadata;
00463     }
00464 
00465     void SetReleaseMetadata(MDNode *NewValue) {
00466       RRI.ReleaseMetadata = NewValue;
00467     }
00468 
00469     bool IsCFGHazardAfflicted() const {
00470       return RRI.CFGHazardAfflicted;
00471     }
00472 
00473     void SetCFGHazardAfflicted(const bool NewValue) {
00474       RRI.CFGHazardAfflicted = NewValue;
00475     }
00476 
00477     void SetKnownPositiveRefCount() {
00478       DEBUG(dbgs() << "Setting Known Positive.\n");
00479       KnownPositiveRefCount = true;
00480     }
00481 
00482     void ClearKnownPositiveRefCount() {
00483       DEBUG(dbgs() << "Clearing Known Positive.\n");
00484       KnownPositiveRefCount = false;
00485     }
00486 
00487     bool HasKnownPositiveRefCount() const {
00488       return KnownPositiveRefCount;
00489     }
00490 
00491     void SetSeq(Sequence NewSeq) {
00492       DEBUG(dbgs() << "Old: " << Seq << "; New: " << NewSeq << "\n");
00493       Seq = NewSeq;
00494     }
00495 
00496     Sequence GetSeq() const {
00497       return static_cast<Sequence>(Seq);
00498     }
00499 
00500     void ClearSequenceProgress() {
00501       ResetSequenceProgress(S_None);
00502     }
00503 
00504     void ResetSequenceProgress(Sequence NewSeq) {
00505       DEBUG(dbgs() << "Resetting sequence progress.\n");
00506       SetSeq(NewSeq);
00507       Partial = false;
00508       RRI.clear();
00509     }
00510 
00511     void Merge(const PtrState &Other, bool TopDown);
00512 
00513     void InsertCall(Instruction *I) {
00514       RRI.Calls.insert(I);
00515     }
00516 
00517     void InsertReverseInsertPt(Instruction *I) {
00518       RRI.ReverseInsertPts.insert(I);
00519     }
00520 
00521     void ClearReverseInsertPts() {
00522       RRI.ReverseInsertPts.clear();
00523     }
00524 
00525     bool HasReverseInsertPts() const {
00526       return !RRI.ReverseInsertPts.empty();
00527     }
00528 
00529     const RRInfo &GetRRInfo() const {
00530       return RRI;
00531     }
00532   };
00533 }
00534 
00535 void
00536 PtrState::Merge(const PtrState &Other, bool TopDown) {
00537   Seq = MergeSeqs(GetSeq(), Other.GetSeq(), TopDown);
00538   KnownPositiveRefCount &= Other.KnownPositiveRefCount;
00539 
00540   // If we're not in a sequence (anymore), drop all associated state.
00541   if (Seq == S_None) {
00542     Partial = false;
00543     RRI.clear();
00544   } else if (Partial || Other.Partial) {
00545     // If we're doing a merge on a path that's previously seen a partial
00546     // merge, conservatively drop the sequence, to avoid doing partial
00547     // RR elimination. If the branch predicates for the two merge differ,
00548     // mixing them is unsafe.
00549     ClearSequenceProgress();
00550   } else {
00551     // Otherwise merge the other PtrState's RRInfo into our RRInfo. At this
00552     // point, we know that currently we are not partial. Stash whether or not
00553     // the merge operation caused us to undergo a partial merging of reverse
00554     // insertion points.
00555     Partial = RRI.Merge(Other.RRI);
00556   }
00557 }
00558 
00559 namespace {
00560   /// \brief Per-BasicBlock state.
00561   class BBState {
00562     /// The number of unique control paths from the entry which can reach this
00563     /// block.
00564     unsigned TopDownPathCount;
00565 
00566     /// The number of unique control paths to exits from this block.
00567     unsigned BottomUpPathCount;
00568 
00569     /// A type for PerPtrTopDown and PerPtrBottomUp.
00570     typedef MapVector<const Value *, PtrState> MapTy;
00571 
00572     /// The top-down traversal uses this to record information known about a
00573     /// pointer at the bottom of each block.
00574     MapTy PerPtrTopDown;
00575 
00576     /// The bottom-up traversal uses this to record information known about a
00577     /// pointer at the top of each block.
00578     MapTy PerPtrBottomUp;
00579 
00580     /// Effective predecessors of the current block ignoring ignorable edges and
00581     /// ignored backedges.
00582     SmallVector<BasicBlock *, 2> Preds;
00583     /// Effective successors of the current block ignoring ignorable edges and
00584     /// ignored backedges.
00585     SmallVector<BasicBlock *, 2> Succs;
00586 
00587   public:
00588     static const unsigned OverflowOccurredValue;
00589 
00590     BBState() : TopDownPathCount(0), BottomUpPathCount(0) { }
00591 
00592     typedef MapTy::iterator ptr_iterator;
00593     typedef MapTy::const_iterator ptr_const_iterator;
00594 
00595     ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
00596     ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
00597     ptr_const_iterator top_down_ptr_begin() const {
00598       return PerPtrTopDown.begin();
00599     }
00600     ptr_const_iterator top_down_ptr_end() const {
00601       return PerPtrTopDown.end();
00602     }
00603 
00604     ptr_iterator bottom_up_ptr_begin() { return PerPtrBottomUp.begin(); }
00605     ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
00606     ptr_const_iterator bottom_up_ptr_begin() const {
00607       return PerPtrBottomUp.begin();
00608     }
00609     ptr_const_iterator bottom_up_ptr_end() const {
00610       return PerPtrBottomUp.end();
00611     }
00612 
00613     /// Mark this block as being an entry block, which has one path from the
00614     /// entry by definition.
00615     void SetAsEntry() { TopDownPathCount = 1; }
00616 
00617     /// Mark this block as being an exit block, which has one path to an exit by
00618     /// definition.
00619     void SetAsExit()  { BottomUpPathCount = 1; }
00620 
00621     /// Attempt to find the PtrState object describing the top down state for
00622     /// pointer Arg. Return a new initialized PtrState describing the top down
00623     /// state for Arg if we do not find one.
00624     PtrState &getPtrTopDownState(const Value *Arg) {
00625       return PerPtrTopDown[Arg];
00626     }
00627 
00628     /// Attempt to find the PtrState object describing the bottom up state for
00629     /// pointer Arg. Return a new initialized PtrState describing the bottom up
00630     /// state for Arg if we do not find one.
00631     PtrState &getPtrBottomUpState(const Value *Arg) {
00632       return PerPtrBottomUp[Arg];
00633     }
00634 
00635     /// Attempt to find the PtrState object describing the bottom up state for
00636     /// pointer Arg.
00637     ptr_iterator findPtrBottomUpState(const Value *Arg) {
00638       return PerPtrBottomUp.find(Arg);
00639     }
00640 
00641     void clearBottomUpPointers() {
00642       PerPtrBottomUp.clear();
00643     }
00644 
00645     void clearTopDownPointers() {
00646       PerPtrTopDown.clear();
00647     }
00648 
00649     void InitFromPred(const BBState &Other);
00650     void InitFromSucc(const BBState &Other);
00651     void MergePred(const BBState &Other);
00652     void MergeSucc(const BBState &Other);
00653 
00654     /// Compute the number of possible unique paths from an entry to an exit
00655     /// which pass through this block. This is only valid after both the
00656     /// top-down and bottom-up traversals are complete.
00657     ///
00658     /// Returns true if overflow occurred. Returns false if overflow did not
00659     /// occur.
00660     bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
00661       if (TopDownPathCount == OverflowOccurredValue ||
00662           BottomUpPathCount == OverflowOccurredValue)
00663         return true;
00664       unsigned long long Product =
00665         (unsigned long long)TopDownPathCount*BottomUpPathCount;
00666       // Overflow occurred if any of the upper bits of Product are set or if all
00667       // the lower bits of Product are all set.
00668       return (Product >> 32) ||
00669              ((PathCount = Product) == OverflowOccurredValue);
00670     }
00671 
00672     // Specialized CFG utilities.
00673     typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
00674     edge_iterator pred_begin() const { return Preds.begin(); }
00675     edge_iterator pred_end() const { return Preds.end(); }
00676     edge_iterator succ_begin() const { return Succs.begin(); }
00677     edge_iterator succ_end() const { return Succs.end(); }
00678 
00679     void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
00680     void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
00681 
00682     bool isExit() const { return Succs.empty(); }
00683   };
00684 
00685   const unsigned BBState::OverflowOccurredValue = 0xffffffff;
00686 }
00687 
00688 void BBState::InitFromPred(const BBState &Other) {
00689   PerPtrTopDown = Other.PerPtrTopDown;
00690   TopDownPathCount = Other.TopDownPathCount;
00691 }
00692 
00693 void BBState::InitFromSucc(const BBState &Other) {
00694   PerPtrBottomUp = Other.PerPtrBottomUp;
00695   BottomUpPathCount = Other.BottomUpPathCount;
00696 }
00697 
00698 /// The top-down traversal uses this to merge information about predecessors to
00699 /// form the initial state for a new block.
00700 void BBState::MergePred(const BBState &Other) {
00701   if (TopDownPathCount == OverflowOccurredValue)
00702     return;
00703 
00704   // Other.TopDownPathCount can be 0, in which case it is either dead or a
00705   // loop backedge. Loop backedges are special.
00706   TopDownPathCount += Other.TopDownPathCount;
00707 
00708   // In order to be consistent, we clear the top down pointers when by adding
00709   // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
00710   // has not occurred.
00711   if (TopDownPathCount == OverflowOccurredValue) {
00712     clearTopDownPointers();
00713     return;
00714   }
00715 
00716   // Check for overflow. If we have overflow, fall back to conservative
00717   // behavior.
00718   if (TopDownPathCount < Other.TopDownPathCount) {
00719     TopDownPathCount = OverflowOccurredValue;
00720     clearTopDownPointers();
00721     return;
00722   }
00723 
00724   // For each entry in the other set, if our set has an entry with the same key,
00725   // merge the entries. Otherwise, copy the entry and merge it with an empty
00726   // entry.
00727   for (ptr_const_iterator MI = Other.top_down_ptr_begin(),
00728        ME = Other.top_down_ptr_end(); MI != ME; ++MI) {
00729     std::pair<ptr_iterator, bool> Pair = PerPtrTopDown.insert(*MI);
00730     Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
00731                              /*TopDown=*/true);
00732   }
00733 
00734   // For each entry in our set, if the other set doesn't have an entry with the
00735   // same key, force it to merge with an empty entry.
00736   for (ptr_iterator MI = top_down_ptr_begin(),
00737        ME = top_down_ptr_end(); MI != ME; ++MI)
00738     if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
00739       MI->second.Merge(PtrState(), /*TopDown=*/true);
00740 }
00741 
00742 /// The bottom-up traversal uses this to merge information about successors to
00743 /// form the initial state for a new block.
00744 void BBState::MergeSucc(const BBState &Other) {
00745   if (BottomUpPathCount == OverflowOccurredValue)
00746     return;
00747 
00748   // Other.BottomUpPathCount can be 0, in which case it is either dead or a
00749   // loop backedge. Loop backedges are special.
00750   BottomUpPathCount += Other.BottomUpPathCount;
00751 
00752   // In order to be consistent, we clear the top down pointers when by adding
00753   // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
00754   // has not occurred.
00755   if (BottomUpPathCount == OverflowOccurredValue) {
00756     clearBottomUpPointers();
00757     return;
00758   }
00759 
00760   // Check for overflow. If we have overflow, fall back to conservative
00761   // behavior.
00762   if (BottomUpPathCount < Other.BottomUpPathCount) {
00763     BottomUpPathCount = OverflowOccurredValue;
00764     clearBottomUpPointers();
00765     return;
00766   }
00767 
00768   // For each entry in the other set, if our set has an entry with the
00769   // same key, merge the entries. Otherwise, copy the entry and merge
00770   // it with an empty entry.
00771   for (ptr_const_iterator MI = Other.bottom_up_ptr_begin(),
00772        ME = Other.bottom_up_ptr_end(); MI != ME; ++MI) {
00773     std::pair<ptr_iterator, bool> Pair = PerPtrBottomUp.insert(*MI);
00774     Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
00775                              /*TopDown=*/false);
00776   }
00777 
00778   // For each entry in our set, if the other set doesn't have an entry
00779   // with the same key, force it to merge with an empty entry.
00780   for (ptr_iterator MI = bottom_up_ptr_begin(),
00781        ME = bottom_up_ptr_end(); MI != ME; ++MI)
00782     if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
00783       MI->second.Merge(PtrState(), /*TopDown=*/false);
00784 }
00785 
00786 // Only enable ARC Annotations if we are building a debug version of
00787 // libObjCARCOpts.
00788 #ifndef NDEBUG
00789 #define ARC_ANNOTATIONS
00790 #endif
00791 
00792 // Define some macros along the lines of DEBUG and some helper functions to make
00793 // it cleaner to create annotations in the source code and to no-op when not
00794 // building in debug mode.
00795 #ifdef ARC_ANNOTATIONS
00796 
00797 #include "llvm/Support/CommandLine.h"
00798 
00799 /// Enable/disable ARC sequence annotations.
00800 static cl::opt<bool>
00801 EnableARCAnnotations("enable-objc-arc-annotations", cl::init(false),
00802                      cl::desc("Enable emission of arc data flow analysis "
00803                               "annotations"));
00804 static cl::opt<bool>
00805 DisableCheckForCFGHazards("disable-objc-arc-checkforcfghazards", cl::init(false),
00806                           cl::desc("Disable check for cfg hazards when "
00807                                    "annotating"));
00808 static cl::opt<std::string>
00809 ARCAnnotationTargetIdentifier("objc-arc-annotation-target-identifier",
00810                               cl::init(""),
00811                               cl::desc("filter out all data flow annotations "
00812                                        "but those that apply to the given "
00813                                        "target llvm identifier."));
00814 
00815 /// This function appends a unique ARCAnnotationProvenanceSourceMDKind id to an
00816 /// instruction so that we can track backwards when post processing via the llvm
00817 /// arc annotation processor tool. If the function is an
00818 static MDString *AppendMDNodeToSourcePtr(unsigned NodeId,
00819                                          Value *Ptr) {
00820   MDString *Hash = nullptr;
00821 
00822   // If pointer is a result of an instruction and it does not have a source
00823   // MDNode it, attach a new MDNode onto it. If pointer is a result of
00824   // an instruction and does have a source MDNode attached to it, return a
00825   // reference to said Node. Otherwise just return 0.
00826   if (Instruction *Inst = dyn_cast<Instruction>(Ptr)) {
00827     MDNode *Node;
00828     if (!(Node = Inst->getMetadata(NodeId))) {
00829       // We do not have any node. Generate and attatch the hash MDString to the
00830       // instruction.
00831 
00832       // We just use an MDString to ensure that this metadata gets written out
00833       // of line at the module level and to provide a very simple format
00834       // encoding the information herein. Both of these makes it simpler to
00835       // parse the annotations by a simple external program.
00836       std::string Str;
00837       raw_string_ostream os(Str);
00838       os << "(" << Inst->getParent()->getParent()->getName() << ",%"
00839          << Inst->getName() << ")";
00840 
00841       Hash = MDString::get(Inst->getContext(), os.str());
00842       Inst->setMetadata(NodeId, MDNode::get(Inst->getContext(),Hash));
00843     } else {
00844       // We have a node. Grab its hash and return it.
00845       assert(Node->getNumOperands() == 1 &&
00846         "An ARCAnnotationProvenanceSourceMDKind can only have 1 operand.");
00847       Hash = cast<MDString>(Node->getOperand(0));
00848     }
00849   } else if (Argument *Arg = dyn_cast<Argument>(Ptr)) {
00850     std::string str;
00851     raw_string_ostream os(str);
00852     os << "(" << Arg->getParent()->getName() << ",%" << Arg->getName()
00853        << ")";
00854     Hash = MDString::get(Arg->getContext(), os.str());
00855   }
00856 
00857   return Hash;
00858 }
00859 
00860 static std::string SequenceToString(Sequence A) {
00861   std::string str;
00862   raw_string_ostream os(str);
00863   os << A;
00864   return os.str();
00865 }
00866 
00867 /// Helper function to change a Sequence into a String object using our overload
00868 /// for raw_ostream so we only have printing code in one location.
00869 static MDString *SequenceToMDString(LLVMContext &Context,
00870                                     Sequence A) {
00871   return MDString::get(Context, SequenceToString(A));
00872 }
00873 
00874 /// A simple function to generate a MDNode which describes the change in state
00875 /// for Value *Ptr caused by Instruction *Inst.
00876 static void AppendMDNodeToInstForPtr(unsigned NodeId,
00877                                      Instruction *Inst,
00878                                      Value *Ptr,
00879                                      MDString *PtrSourceMDNodeID,
00880                                      Sequence OldSeq,
00881                                      Sequence NewSeq) {
00882   MDNode *Node = nullptr;
00883   Value *tmp[3] = {PtrSourceMDNodeID,
00884                    SequenceToMDString(Inst->getContext(),
00885                                       OldSeq),
00886                    SequenceToMDString(Inst->getContext(),
00887                                       NewSeq)};
00888   Node = MDNode::get(Inst->getContext(), tmp);
00889 
00890   Inst->setMetadata(NodeId, Node);
00891 }
00892 
00893 /// Add to the beginning of the basic block llvm.ptr.annotations which show the
00894 /// state of a pointer at the entrance to a basic block.
00895 static void GenerateARCBBEntranceAnnotation(const char *Name, BasicBlock *BB,
00896                                             Value *Ptr, Sequence Seq) {
00897   // If we have a target identifier, make sure that we match it before
00898   // continuing.
00899   if(!ARCAnnotationTargetIdentifier.empty() &&
00900      !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
00901     return;
00902 
00903   Module *M = BB->getParent()->getParent();
00904   LLVMContext &C = M->getContext();
00905   Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
00906   Type *I8XX = PointerType::getUnqual(I8X);
00907   Type *Params[] = {I8XX, I8XX};
00908   FunctionType *FTy = FunctionType::get(Type::getVoidTy(C), Params,
00909                                         /*isVarArg=*/false);
00910   Constant *Callee = M->getOrInsertFunction(Name, FTy);
00911 
00912   IRBuilder<> Builder(BB, BB->getFirstInsertionPt());
00913 
00914   Value *PtrName;
00915   StringRef Tmp = Ptr->getName();
00916   if (nullptr == (PtrName = M->getGlobalVariable(Tmp, true))) {
00917     Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
00918                                                          Tmp + "_STR");
00919     PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
00920                                  cast<Constant>(ActualPtrName), Tmp);
00921   }
00922 
00923   Value *S;
00924   std::string SeqStr = SequenceToString(Seq);
00925   if (nullptr == (S = M->getGlobalVariable(SeqStr, true))) {
00926     Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
00927                                                          SeqStr + "_STR");
00928     S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
00929                            cast<Constant>(ActualPtrName), SeqStr);
00930   }
00931 
00932   Builder.CreateCall2(Callee, PtrName, S);
00933 }
00934 
00935 /// Add to the end of the basic block llvm.ptr.annotations which show the state
00936 /// of the pointer at the bottom of the basic block.
00937 static void GenerateARCBBTerminatorAnnotation(const char *Name, BasicBlock *BB,
00938                                               Value *Ptr, Sequence Seq) {
00939   // If we have a target identifier, make sure that we match it before emitting
00940   // an annotation.
00941   if(!ARCAnnotationTargetIdentifier.empty() &&
00942      !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
00943     return;
00944 
00945   Module *M = BB->getParent()->getParent();
00946   LLVMContext &C = M->getContext();
00947   Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
00948   Type *I8XX = PointerType::getUnqual(I8X);
00949   Type *Params[] = {I8XX, I8XX};
00950   FunctionType *FTy = FunctionType::get(Type::getVoidTy(C), Params,
00951                                         /*isVarArg=*/false);
00952   Constant *Callee = M->getOrInsertFunction(Name, FTy);
00953 
00954   IRBuilder<> Builder(BB, std::prev(BB->end()));
00955 
00956   Value *PtrName;
00957   StringRef Tmp = Ptr->getName();
00958   if (nullptr == (PtrName = M->getGlobalVariable(Tmp, true))) {
00959     Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
00960                                                          Tmp + "_STR");
00961     PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
00962                                  cast<Constant>(ActualPtrName), Tmp);
00963   }
00964 
00965   Value *S;
00966   std::string SeqStr = SequenceToString(Seq);
00967   if (nullptr == (S = M->getGlobalVariable(SeqStr, true))) {
00968     Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
00969                                                          SeqStr + "_STR");
00970     S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
00971                            cast<Constant>(ActualPtrName), SeqStr);
00972   }
00973   Builder.CreateCall2(Callee, PtrName, S);
00974 }
00975 
00976 /// Adds a source annotation to pointer and a state change annotation to Inst
00977 /// referencing the source annotation and the old/new state of pointer.
00978 static void GenerateARCAnnotation(unsigned InstMDId,
00979                                   unsigned PtrMDId,
00980                                   Instruction *Inst,
00981                                   Value *Ptr,
00982                                   Sequence OldSeq,
00983                                   Sequence NewSeq) {
00984   if (EnableARCAnnotations) {
00985     // If we have a target identifier, make sure that we match it before
00986     // emitting an annotation.
00987     if(!ARCAnnotationTargetIdentifier.empty() &&
00988        !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
00989       return;
00990 
00991     // First generate the source annotation on our pointer. This will return an
00992     // MDString* if Ptr actually comes from an instruction implying we can put
00993     // in a source annotation. If AppendMDNodeToSourcePtr returns 0 (i.e. NULL),
00994     // then we know that our pointer is from an Argument so we put a reference
00995     // to the argument number.
00996     //
00997     // The point of this is to make it easy for the
00998     // llvm-arc-annotation-processor tool to cross reference where the source
00999     // pointer is in the LLVM IR since the LLVM IR parser does not submit such
01000     // information via debug info for backends to use (since why would anyone
01001     // need such a thing from LLVM IR besides in non-standard cases
01002     // [i.e. this]).
01003     MDString *SourcePtrMDNode =
01004       AppendMDNodeToSourcePtr(PtrMDId, Ptr);
01005     AppendMDNodeToInstForPtr(InstMDId, Inst, Ptr, SourcePtrMDNode, OldSeq,
01006                              NewSeq);
01007   }
01008 }
01009 
01010 // The actual interface for accessing the above functionality is defined via
01011 // some simple macros which are defined below. We do this so that the user does
01012 // not need to pass in what metadata id is needed resulting in cleaner code and
01013 // additionally since it provides an easy way to conditionally no-op all
01014 // annotation support in a non-debug build.
01015 
01016 /// Use this macro to annotate a sequence state change when processing
01017 /// instructions bottom up,
01018 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new)                          \
01019   GenerateARCAnnotation(ARCAnnotationBottomUpMDKind,                    \
01020                         ARCAnnotationProvenanceSourceMDKind, (inst),    \
01021                         const_cast<Value*>(ptr), (old), (new))
01022 /// Use this macro to annotate a sequence state change when processing
01023 /// instructions top down.
01024 #define ANNOTATE_TOPDOWN(inst, ptr, old, new)                           \
01025   GenerateARCAnnotation(ARCAnnotationTopDownMDKind,                     \
01026                         ARCAnnotationProvenanceSourceMDKind, (inst),    \
01027                         const_cast<Value*>(ptr), (old), (new))
01028 
01029 #define ANNOTATE_BB(_states, _bb, _name, _type, _direction)                   \
01030   do {                                                                        \
01031     if (EnableARCAnnotations) {                                               \
01032       for(BBState::ptr_const_iterator I = (_states)._direction##_ptr_begin(), \
01033           E = (_states)._direction##_ptr_end(); I != E; ++I) {                \
01034         Value *Ptr = const_cast<Value*>(I->first);                            \
01035         Sequence Seq = I->second.GetSeq();                                    \
01036         GenerateARCBB ## _type ## Annotation(_name, (_bb), Ptr, Seq);         \
01037       }                                                                       \
01038     }                                                                         \
01039   } while (0)
01040 
01041 #define ANNOTATE_BOTTOMUP_BBSTART(_states, _basicblock)                       \
01042     ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbstart", \
01043                 Entrance, bottom_up)
01044 #define ANNOTATE_BOTTOMUP_BBEND(_states, _basicblock)                         \
01045     ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbend",   \
01046                 Terminator, bottom_up)
01047 #define ANNOTATE_TOPDOWN_BBSTART(_states, _basicblock)                        \
01048     ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbstart",  \
01049                 Entrance, top_down)
01050 #define ANNOTATE_TOPDOWN_BBEND(_states, _basicblock)                          \
01051     ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbend",    \
01052                 Terminator, top_down)
01053 
01054 #else // !ARC_ANNOTATION
01055 // If annotations are off, noop.
01056 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new)
01057 #define ANNOTATE_TOPDOWN(inst, ptr, old, new)
01058 #define ANNOTATE_BOTTOMUP_BBSTART(states, basicblock)
01059 #define ANNOTATE_BOTTOMUP_BBEND(states, basicblock)
01060 #define ANNOTATE_TOPDOWN_BBSTART(states, basicblock)
01061 #define ANNOTATE_TOPDOWN_BBEND(states, basicblock)
01062 #endif // !ARC_ANNOTATION
01063 
01064 namespace {
01065   /// \brief The main ARC optimization pass.
01066   class ObjCARCOpt : public FunctionPass {
01067     bool Changed;
01068     ProvenanceAnalysis PA;
01069     ARCRuntimeEntryPoints EP;
01070 
01071     // This is used to track if a pointer is stored into an alloca.
01072     DenseSet<const Value *> MultiOwnersSet;
01073 
01074     /// A flag indicating whether this optimization pass should run.
01075     bool Run;
01076 
01077     /// Flags which determine whether each of the interesting runtine functions
01078     /// is in fact used in the current function.
01079     unsigned UsedInThisFunction;
01080 
01081     /// The Metadata Kind for clang.imprecise_release metadata.
01082     unsigned ImpreciseReleaseMDKind;
01083 
01084     /// The Metadata Kind for clang.arc.copy_on_escape metadata.
01085     unsigned CopyOnEscapeMDKind;
01086 
01087     /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata.
01088     unsigned NoObjCARCExceptionsMDKind;
01089 
01090 #ifdef ARC_ANNOTATIONS
01091     /// The Metadata Kind for llvm.arc.annotation.bottomup metadata.
01092     unsigned ARCAnnotationBottomUpMDKind;
01093     /// The Metadata Kind for llvm.arc.annotation.topdown metadata.
01094     unsigned ARCAnnotationTopDownMDKind;
01095     /// The Metadata Kind for llvm.arc.annotation.provenancesource metadata.
01096     unsigned ARCAnnotationProvenanceSourceMDKind;
01097 #endif // ARC_ANNOATIONS
01098 
01099     bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
01100     void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
01101                                    InstructionClass &Class);
01102     void OptimizeIndividualCalls(Function &F);
01103 
01104     void CheckForCFGHazards(const BasicBlock *BB,
01105                             DenseMap<const BasicBlock *, BBState> &BBStates,
01106                             BBState &MyStates) const;
01107     bool VisitInstructionBottomUp(Instruction *Inst,
01108                                   BasicBlock *BB,
01109                                   MapVector<Value *, RRInfo> &Retains,
01110                                   BBState &MyStates);
01111     bool VisitBottomUp(BasicBlock *BB,
01112                        DenseMap<const BasicBlock *, BBState> &BBStates,
01113                        MapVector<Value *, RRInfo> &Retains);
01114     bool VisitInstructionTopDown(Instruction *Inst,
01115                                  DenseMap<Value *, RRInfo> &Releases,
01116                                  BBState &MyStates);
01117     bool VisitTopDown(BasicBlock *BB,
01118                       DenseMap<const BasicBlock *, BBState> &BBStates,
01119                       DenseMap<Value *, RRInfo> &Releases);
01120     bool Visit(Function &F,
01121                DenseMap<const BasicBlock *, BBState> &BBStates,
01122                MapVector<Value *, RRInfo> &Retains,
01123                DenseMap<Value *, RRInfo> &Releases);
01124 
01125     void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
01126                    MapVector<Value *, RRInfo> &Retains,
01127                    DenseMap<Value *, RRInfo> &Releases,
01128                    SmallVectorImpl<Instruction *> &DeadInsts,
01129                    Module *M);
01130 
01131     bool ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> &BBStates,
01132                                MapVector<Value *, RRInfo> &Retains,
01133                                DenseMap<Value *, RRInfo> &Releases,
01134                                Module *M,
01135                                SmallVectorImpl<Instruction *> &NewRetains,
01136                                SmallVectorImpl<Instruction *> &NewReleases,
01137                                SmallVectorImpl<Instruction *> &DeadInsts,
01138                                RRInfo &RetainsToMove,
01139                                RRInfo &ReleasesToMove,
01140                                Value *Arg,
01141                                bool KnownSafe,
01142                                bool &AnyPairsCompletelyEliminated);
01143 
01144     bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
01145                               MapVector<Value *, RRInfo> &Retains,
01146                               DenseMap<Value *, RRInfo> &Releases,
01147                               Module *M);
01148 
01149     void OptimizeWeakCalls(Function &F);
01150 
01151     bool OptimizeSequences(Function &F);
01152 
01153     void OptimizeReturns(Function &F);
01154 
01155 #ifndef NDEBUG
01156     void GatherStatistics(Function &F, bool AfterOptimization = false);
01157 #endif
01158 
01159     void getAnalysisUsage(AnalysisUsage &AU) const override;
01160     bool doInitialization(Module &M) override;
01161     bool runOnFunction(Function &F) override;
01162     void releaseMemory() override;
01163 
01164   public:
01165     static char ID;
01166     ObjCARCOpt() : FunctionPass(ID) {
01167       initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
01168     }
01169   };
01170 }
01171 
01172 char ObjCARCOpt::ID = 0;
01173 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
01174                       "objc-arc", "ObjC ARC optimization", false, false)
01175 INITIALIZE_PASS_DEPENDENCY(ObjCARCAliasAnalysis)
01176 INITIALIZE_PASS_END(ObjCARCOpt,
01177                     "objc-arc", "ObjC ARC optimization", false, false)
01178 
01179 Pass *llvm::createObjCARCOptPass() {
01180   return new ObjCARCOpt();
01181 }
01182 
01183 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
01184   AU.addRequired<ObjCARCAliasAnalysis>();
01185   AU.addRequired<AliasAnalysis>();
01186   // ARC optimization doesn't currently split critical edges.
01187   AU.setPreservesCFG();
01188 }
01189 
01190 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
01191 /// not a return value.  Or, if it can be paired with an
01192 /// objc_autoreleaseReturnValue, delete the pair and return true.
01193 bool
01194 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
01195   // Check for the argument being from an immediately preceding call or invoke.
01196   const Value *Arg = GetObjCArg(RetainRV);
01197   ImmutableCallSite CS(Arg);
01198   if (const Instruction *Call = CS.getInstruction()) {
01199     if (Call->getParent() == RetainRV->getParent()) {
01200       BasicBlock::const_iterator I = Call;
01201       ++I;
01202       while (IsNoopInstruction(I)) ++I;
01203       if (&*I == RetainRV)
01204         return false;
01205     } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
01206       BasicBlock *RetainRVParent = RetainRV->getParent();
01207       if (II->getNormalDest() == RetainRVParent) {
01208         BasicBlock::const_iterator I = RetainRVParent->begin();
01209         while (IsNoopInstruction(I)) ++I;
01210         if (&*I == RetainRV)
01211           return false;
01212       }
01213     }
01214   }
01215 
01216   // Check for being preceded by an objc_autoreleaseReturnValue on the same
01217   // pointer. In this case, we can delete the pair.
01218   BasicBlock::iterator I = RetainRV, Begin = RetainRV->getParent()->begin();
01219   if (I != Begin) {
01220     do --I; while (I != Begin && IsNoopInstruction(I));
01221     if (GetBasicInstructionClass(I) == IC_AutoreleaseRV &&
01222         GetObjCArg(I) == Arg) {
01223       Changed = true;
01224       ++NumPeeps;
01225 
01226       DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
01227                    << "Erasing " << *RetainRV << "\n");
01228 
01229       EraseInstruction(I);
01230       EraseInstruction(RetainRV);
01231       return true;
01232     }
01233   }
01234 
01235   // Turn it to a plain objc_retain.
01236   Changed = true;
01237   ++NumPeeps;
01238 
01239   DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
01240                   "objc_retain since the operand is not a return value.\n"
01241                   "Old = " << *RetainRV << "\n");
01242 
01243   Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
01244   cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
01245 
01246   DEBUG(dbgs() << "New = " << *RetainRV << "\n");
01247 
01248   return false;
01249 }
01250 
01251 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
01252 /// used as a return value.
01253 void
01254 ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
01255                                       InstructionClass &Class) {
01256   // Check for a return of the pointer value.
01257   const Value *Ptr = GetObjCArg(AutoreleaseRV);
01258   SmallVector<const Value *, 2> Users;
01259   Users.push_back(Ptr);
01260   do {
01261     Ptr = Users.pop_back_val();
01262     for (const User *U : Ptr->users()) {
01263       if (isa<ReturnInst>(U) || GetBasicInstructionClass(U) == IC_RetainRV)
01264         return;
01265       if (isa<BitCastInst>(U))
01266         Users.push_back(U);
01267     }
01268   } while (!Users.empty());
01269 
01270   Changed = true;
01271   ++NumPeeps;
01272 
01273   DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
01274                   "objc_autorelease since its operand is not used as a return "
01275                   "value.\n"
01276                   "Old = " << *AutoreleaseRV << "\n");
01277 
01278   CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
01279   Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Autorelease);
01280   AutoreleaseRVCI->setCalledFunction(NewDecl);
01281   AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
01282   Class = IC_Autorelease;
01283 
01284   DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
01285 
01286 }
01287 
01288 /// Visit each call, one at a time, and make simplifications without doing any
01289 /// additional analysis.
01290 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
01291   DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
01292   // Reset all the flags in preparation for recomputing them.
01293   UsedInThisFunction = 0;
01294 
01295   // Visit all objc_* calls in F.
01296   for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
01297     Instruction *Inst = &*I++;
01298 
01299     InstructionClass Class = GetBasicInstructionClass(Inst);
01300 
01301     DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
01302 
01303     switch (Class) {
01304     default: break;
01305 
01306     // Delete no-op casts. These function calls have special semantics, but
01307     // the semantics are entirely implemented via lowering in the front-end,
01308     // so by the time they reach the optimizer, they are just no-op calls
01309     // which return their argument.
01310     //
01311     // There are gray areas here, as the ability to cast reference-counted
01312     // pointers to raw void* and back allows code to break ARC assumptions,
01313     // however these are currently considered to be unimportant.
01314     case IC_NoopCast:
01315       Changed = true;
01316       ++NumNoops;
01317       DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
01318       EraseInstruction(Inst);
01319       continue;
01320 
01321     // If the pointer-to-weak-pointer is null, it's undefined behavior.
01322     case IC_StoreWeak:
01323     case IC_LoadWeak:
01324     case IC_LoadWeakRetained:
01325     case IC_InitWeak:
01326     case IC_DestroyWeak: {
01327       CallInst *CI = cast<CallInst>(Inst);
01328       if (IsNullOrUndef(CI->getArgOperand(0))) {
01329         Changed = true;
01330         Type *Ty = CI->getArgOperand(0)->getType();
01331         new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
01332                       Constant::getNullValue(Ty),
01333                       CI);
01334         llvm::Value *NewValue = UndefValue::get(CI->getType());
01335         DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
01336                        "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
01337         CI->replaceAllUsesWith(NewValue);
01338         CI->eraseFromParent();
01339         continue;
01340       }
01341       break;
01342     }
01343     case IC_CopyWeak:
01344     case IC_MoveWeak: {
01345       CallInst *CI = cast<CallInst>(Inst);
01346       if (IsNullOrUndef(CI->getArgOperand(0)) ||
01347           IsNullOrUndef(CI->getArgOperand(1))) {
01348         Changed = true;
01349         Type *Ty = CI->getArgOperand(0)->getType();
01350         new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
01351                       Constant::getNullValue(Ty),
01352                       CI);
01353 
01354         llvm::Value *NewValue = UndefValue::get(CI->getType());
01355         DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
01356                         "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
01357 
01358         CI->replaceAllUsesWith(NewValue);
01359         CI->eraseFromParent();
01360         continue;
01361       }
01362       break;
01363     }
01364     case IC_RetainRV:
01365       if (OptimizeRetainRVCall(F, Inst))
01366         continue;
01367       break;
01368     case IC_AutoreleaseRV:
01369       OptimizeAutoreleaseRVCall(F, Inst, Class);
01370       break;
01371     }
01372 
01373     // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
01374     if (IsAutorelease(Class) && Inst->use_empty()) {
01375       CallInst *Call = cast<CallInst>(Inst);
01376       const Value *Arg = Call->getArgOperand(0);
01377       Arg = FindSingleUseIdentifiedObject(Arg);
01378       if (Arg) {
01379         Changed = true;
01380         ++NumAutoreleases;
01381 
01382         // Create the declaration lazily.
01383         LLVMContext &C = Inst->getContext();
01384 
01385         Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
01386         CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
01387                                              Call);
01388         NewCall->setMetadata(ImpreciseReleaseMDKind, MDNode::get(C, None));
01389 
01390         DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
01391               "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
01392               << *NewCall << "\n");
01393 
01394         EraseInstruction(Call);
01395         Inst = NewCall;
01396         Class = IC_Release;
01397       }
01398     }
01399 
01400     // For functions which can never be passed stack arguments, add
01401     // a tail keyword.
01402     if (IsAlwaysTail(Class)) {
01403       Changed = true;
01404       DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
01405                       "passed stack args: " << *Inst << "\n");
01406       cast<CallInst>(Inst)->setTailCall();
01407     }
01408 
01409     // Ensure that functions that can never have a "tail" keyword due to the
01410     // semantics of ARC truly do not do so.
01411     if (IsNeverTail(Class)) {
01412       Changed = true;
01413       DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
01414             "\n");
01415       cast<CallInst>(Inst)->setTailCall(false);
01416     }
01417 
01418     // Set nounwind as needed.
01419     if (IsNoThrow(Class)) {
01420       Changed = true;
01421       DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
01422                    << "\n");
01423       cast<CallInst>(Inst)->setDoesNotThrow();
01424     }
01425 
01426     if (!IsNoopOnNull(Class)) {
01427       UsedInThisFunction |= 1 << Class;
01428       continue;
01429     }
01430 
01431     const Value *Arg = GetObjCArg(Inst);
01432 
01433     // ARC calls with null are no-ops. Delete them.
01434     if (IsNullOrUndef(Arg)) {
01435       Changed = true;
01436       ++NumNoops;
01437       DEBUG(dbgs() << "ARC calls with  null are no-ops. Erasing: " << *Inst
01438             << "\n");
01439       EraseInstruction(Inst);
01440       continue;
01441     }
01442 
01443     // Keep track of which of retain, release, autorelease, and retain_block
01444     // are actually present in this function.
01445     UsedInThisFunction |= 1 << Class;
01446 
01447     // If Arg is a PHI, and one or more incoming values to the
01448     // PHI are null, and the call is control-equivalent to the PHI, and there
01449     // are no relevant side effects between the PHI and the call, the call
01450     // could be pushed up to just those paths with non-null incoming values.
01451     // For now, don't bother splitting critical edges for this.
01452     SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
01453     Worklist.push_back(std::make_pair(Inst, Arg));
01454     do {
01455       std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
01456       Inst = Pair.first;
01457       Arg = Pair.second;
01458 
01459       const PHINode *PN = dyn_cast<PHINode>(Arg);
01460       if (!PN) continue;
01461 
01462       // Determine if the PHI has any null operands, or any incoming
01463       // critical edges.
01464       bool HasNull = false;
01465       bool HasCriticalEdges = false;
01466       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
01467         Value *Incoming =
01468           StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
01469         if (IsNullOrUndef(Incoming))
01470           HasNull = true;
01471         else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
01472                    .getNumSuccessors() != 1) {
01473           HasCriticalEdges = true;
01474           break;
01475         }
01476       }
01477       // If we have null operands and no critical edges, optimize.
01478       if (!HasCriticalEdges && HasNull) {
01479         SmallPtrSet<Instruction *, 4> DependingInstructions;
01480         SmallPtrSet<const BasicBlock *, 4> Visited;
01481 
01482         // Check that there is nothing that cares about the reference
01483         // count between the call and the phi.
01484         switch (Class) {
01485         case IC_Retain:
01486         case IC_RetainBlock:
01487           // These can always be moved up.
01488           break;
01489         case IC_Release:
01490           // These can't be moved across things that care about the retain
01491           // count.
01492           FindDependencies(NeedsPositiveRetainCount, Arg,
01493                            Inst->getParent(), Inst,
01494                            DependingInstructions, Visited, PA);
01495           break;
01496         case IC_Autorelease:
01497           // These can't be moved across autorelease pool scope boundaries.
01498           FindDependencies(AutoreleasePoolBoundary, Arg,
01499                            Inst->getParent(), Inst,
01500                            DependingInstructions, Visited, PA);
01501           break;
01502         case IC_RetainRV:
01503         case IC_AutoreleaseRV:
01504           // Don't move these; the RV optimization depends on the autoreleaseRV
01505           // being tail called, and the retainRV being immediately after a call
01506           // (which might still happen if we get lucky with codegen layout, but
01507           // it's not worth taking the chance).
01508           continue;
01509         default:
01510           llvm_unreachable("Invalid dependence flavor");
01511         }
01512 
01513         if (DependingInstructions.size() == 1 &&
01514             *DependingInstructions.begin() == PN) {
01515           Changed = true;
01516           ++NumPartialNoops;
01517           // Clone the call into each predecessor that has a non-null value.
01518           CallInst *CInst = cast<CallInst>(Inst);
01519           Type *ParamTy = CInst->getArgOperand(0)->getType();
01520           for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
01521             Value *Incoming =
01522               StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
01523             if (!IsNullOrUndef(Incoming)) {
01524               CallInst *Clone = cast<CallInst>(CInst->clone());
01525               Value *Op = PN->getIncomingValue(i);
01526               Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
01527               if (Op->getType() != ParamTy)
01528                 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
01529               Clone->setArgOperand(0, Op);
01530               Clone->insertBefore(InsertPos);
01531 
01532               DEBUG(dbgs() << "Cloning "
01533                            << *CInst << "\n"
01534                            "And inserting clone at " << *InsertPos << "\n");
01535               Worklist.push_back(std::make_pair(Clone, Incoming));
01536             }
01537           }
01538           // Erase the original call.
01539           DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
01540           EraseInstruction(CInst);
01541           continue;
01542         }
01543       }
01544     } while (!Worklist.empty());
01545   }
01546 }
01547 
01548 /// If we have a top down pointer in the S_Use state, make sure that there are
01549 /// no CFG hazards by checking the states of various bottom up pointers.
01550 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
01551                                  const bool SuccSRRIKnownSafe,
01552                                  PtrState &S,
01553                                  bool &SomeSuccHasSame,
01554                                  bool &AllSuccsHaveSame,
01555                                  bool &NotAllSeqEqualButKnownSafe,
01556                                  bool &ShouldContinue) {
01557   switch (SuccSSeq) {
01558   case S_CanRelease: {
01559     if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
01560       S.ClearSequenceProgress();
01561       break;
01562     }
01563     S.SetCFGHazardAfflicted(true);
01564     ShouldContinue = true;
01565     break;
01566   }
01567   case S_Use:
01568     SomeSuccHasSame = true;
01569     break;
01570   case S_Stop:
01571   case S_Release:
01572   case S_MovableRelease:
01573     if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
01574       AllSuccsHaveSame = false;
01575     else
01576       NotAllSeqEqualButKnownSafe = true;
01577     break;
01578   case S_Retain:
01579     llvm_unreachable("bottom-up pointer in retain state!");
01580   case S_None:
01581     llvm_unreachable("This should have been handled earlier.");
01582   }
01583 }
01584 
01585 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
01586 /// there are no CFG hazards by checking the states of various bottom up
01587 /// pointers.
01588 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
01589                                         const bool SuccSRRIKnownSafe,
01590                                         PtrState &S,
01591                                         bool &SomeSuccHasSame,
01592                                         bool &AllSuccsHaveSame,
01593                                         bool &NotAllSeqEqualButKnownSafe) {
01594   switch (SuccSSeq) {
01595   case S_CanRelease:
01596     SomeSuccHasSame = true;
01597     break;
01598   case S_Stop:
01599   case S_Release:
01600   case S_MovableRelease:
01601   case S_Use:
01602     if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
01603       AllSuccsHaveSame = false;
01604     else
01605       NotAllSeqEqualButKnownSafe = true;
01606     break;
01607   case S_Retain:
01608     llvm_unreachable("bottom-up pointer in retain state!");
01609   case S_None:
01610     llvm_unreachable("This should have been handled earlier.");
01611   }
01612 }
01613 
01614 /// Check for critical edges, loop boundaries, irreducible control flow, or
01615 /// other CFG structures where moving code across the edge would result in it
01616 /// being executed more.
01617 void
01618 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
01619                                DenseMap<const BasicBlock *, BBState> &BBStates,
01620                                BBState &MyStates) const {
01621   // If any top-down local-use or possible-dec has a succ which is earlier in
01622   // the sequence, forget it.
01623   for (BBState::ptr_iterator I = MyStates.top_down_ptr_begin(),
01624          E = MyStates.top_down_ptr_end(); I != E; ++I) {
01625     PtrState &S = I->second;
01626     const Sequence Seq = I->second.GetSeq();
01627 
01628     // We only care about S_Retain, S_CanRelease, and S_Use.
01629     if (Seq == S_None)
01630       continue;
01631 
01632     // Make sure that if extra top down states are added in the future that this
01633     // code is updated to handle it.
01634     assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
01635            "Unknown top down sequence state.");
01636 
01637     const Value *Arg = I->first;
01638     const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
01639     bool SomeSuccHasSame = false;
01640     bool AllSuccsHaveSame = true;
01641     bool NotAllSeqEqualButKnownSafe = false;
01642 
01643     succ_const_iterator SI(TI), SE(TI, false);
01644 
01645     for (; SI != SE; ++SI) {
01646       // If VisitBottomUp has pointer information for this successor, take
01647       // what we know about it.
01648       const DenseMap<const BasicBlock *, BBState>::iterator BBI =
01649         BBStates.find(*SI);
01650       assert(BBI != BBStates.end());
01651       const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
01652       const Sequence SuccSSeq = SuccS.GetSeq();
01653 
01654       // If bottom up, the pointer is in an S_None state, clear the sequence
01655       // progress since the sequence in the bottom up state finished
01656       // suggesting a mismatch in between retains/releases. This is true for
01657       // all three cases that we are handling here: S_Retain, S_Use, and
01658       // S_CanRelease.
01659       if (SuccSSeq == S_None) {
01660         S.ClearSequenceProgress();
01661         continue;
01662       }
01663 
01664       // If we have S_Use or S_CanRelease, perform our check for cfg hazard
01665       // checks.
01666       const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
01667 
01668       // *NOTE* We do not use Seq from above here since we are allowing for
01669       // S.GetSeq() to change while we are visiting basic blocks.
01670       switch(S.GetSeq()) {
01671       case S_Use: {
01672         bool ShouldContinue = false;
01673         CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
01674                              AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
01675                              ShouldContinue);
01676         if (ShouldContinue)
01677           continue;
01678         break;
01679       }
01680       case S_CanRelease: {
01681         CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
01682                                     SomeSuccHasSame, AllSuccsHaveSame,
01683                                     NotAllSeqEqualButKnownSafe);
01684         break;
01685       }
01686       case S_Retain:
01687       case S_None:
01688       case S_Stop:
01689       case S_Release:
01690       case S_MovableRelease:
01691         break;
01692       }
01693     }
01694 
01695     // If the state at the other end of any of the successor edges
01696     // matches the current state, require all edges to match. This
01697     // guards against loops in the middle of a sequence.
01698     if (SomeSuccHasSame && !AllSuccsHaveSame) {
01699       S.ClearSequenceProgress();
01700     } else if (NotAllSeqEqualButKnownSafe) {
01701       // If we would have cleared the state foregoing the fact that we are known
01702       // safe, stop code motion. This is because whether or not it is safe to
01703       // remove RR pairs via KnownSafe is an orthogonal concept to whether we
01704       // are allowed to perform code motion.
01705       S.SetCFGHazardAfflicted(true);
01706     }
01707   }
01708 }
01709 
01710 bool
01711 ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
01712                                      BasicBlock *BB,
01713                                      MapVector<Value *, RRInfo> &Retains,
01714                                      BBState &MyStates) {
01715   bool NestingDetected = false;
01716   InstructionClass Class = GetInstructionClass(Inst);
01717   const Value *Arg = nullptr;
01718 
01719   DEBUG(dbgs() << "Class: " << Class << "\n");
01720 
01721   switch (Class) {
01722   case IC_Release: {
01723     Arg = GetObjCArg(Inst);
01724 
01725     PtrState &S = MyStates.getPtrBottomUpState(Arg);
01726 
01727     // If we see two releases in a row on the same pointer. If so, make
01728     // a note, and we'll cicle back to revisit it after we've
01729     // hopefully eliminated the second release, which may allow us to
01730     // eliminate the first release too.
01731     // Theoretically we could implement removal of nested retain+release
01732     // pairs by making PtrState hold a stack of states, but this is
01733     // simple and avoids adding overhead for the non-nested case.
01734     if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease) {
01735       DEBUG(dbgs() << "Found nested releases (i.e. a release pair)\n");
01736       NestingDetected = true;
01737     }
01738 
01739     MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
01740     Sequence NewSeq = ReleaseMetadata ? S_MovableRelease : S_Release;
01741     ANNOTATE_BOTTOMUP(Inst, Arg, S.GetSeq(), NewSeq);
01742     S.ResetSequenceProgress(NewSeq);
01743     S.SetReleaseMetadata(ReleaseMetadata);
01744     S.SetKnownSafe(S.HasKnownPositiveRefCount());
01745     S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
01746     S.InsertCall(Inst);
01747     S.SetKnownPositiveRefCount();
01748     break;
01749   }
01750   case IC_RetainBlock:
01751     // In OptimizeIndividualCalls, we have strength reduced all optimizable
01752     // objc_retainBlocks to objc_retains. Thus at this point any
01753     // objc_retainBlocks that we see are not optimizable.
01754     break;
01755   case IC_Retain:
01756   case IC_RetainRV: {
01757     Arg = GetObjCArg(Inst);
01758 
01759     PtrState &S = MyStates.getPtrBottomUpState(Arg);
01760     S.SetKnownPositiveRefCount();
01761 
01762     Sequence OldSeq = S.GetSeq();
01763     switch (OldSeq) {
01764     case S_Stop:
01765     case S_Release:
01766     case S_MovableRelease:
01767     case S_Use:
01768       // If OldSeq is not S_Use or OldSeq is S_Use and we are tracking an
01769       // imprecise release, clear our reverse insertion points.
01770       if (OldSeq != S_Use || S.IsTrackingImpreciseReleases())
01771         S.ClearReverseInsertPts();
01772       // FALL THROUGH
01773     case S_CanRelease:
01774       // Don't do retain+release tracking for IC_RetainRV, because it's
01775       // better to let it remain as the first instruction after a call.
01776       if (Class != IC_RetainRV)
01777         Retains[Inst] = S.GetRRInfo();
01778       S.ClearSequenceProgress();
01779       break;
01780     case S_None:
01781       break;
01782     case S_Retain:
01783       llvm_unreachable("bottom-up pointer in retain state!");
01784     }
01785     ANNOTATE_BOTTOMUP(Inst, Arg, OldSeq, S.GetSeq());
01786     // A retain moving bottom up can be a use.
01787     break;
01788   }
01789   case IC_AutoreleasepoolPop:
01790     // Conservatively, clear MyStates for all known pointers.
01791     MyStates.clearBottomUpPointers();
01792     return NestingDetected;
01793   case IC_AutoreleasepoolPush:
01794   case IC_None:
01795     // These are irrelevant.
01796     return NestingDetected;
01797   case IC_User:
01798     // If we have a store into an alloca of a pointer we are tracking, the
01799     // pointer has multiple owners implying that we must be more conservative.
01800     //
01801     // This comes up in the context of a pointer being ``KnownSafe''. In the
01802     // presence of a block being initialized, the frontend will emit the
01803     // objc_retain on the original pointer and the release on the pointer loaded
01804     // from the alloca. The optimizer will through the provenance analysis
01805     // realize that the two are related, but since we only require KnownSafe in
01806     // one direction, will match the inner retain on the original pointer with
01807     // the guard release on the original pointer. This is fixed by ensuring that
01808     // in the presence of allocas we only unconditionally remove pointers if
01809     // both our retain and our release are KnownSafe.
01810     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
01811       if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
01812         BBState::ptr_iterator I = MyStates.findPtrBottomUpState(
01813           StripPointerCastsAndObjCCalls(SI->getValueOperand()));
01814         if (I != MyStates.bottom_up_ptr_end())
01815           MultiOwnersSet.insert(I->first);
01816       }
01817     }
01818     break;
01819   default:
01820     break;
01821   }
01822 
01823   // Consider any other possible effects of this instruction on each
01824   // pointer being tracked.
01825   for (BBState::ptr_iterator MI = MyStates.bottom_up_ptr_begin(),
01826        ME = MyStates.bottom_up_ptr_end(); MI != ME; ++MI) {
01827     const Value *Ptr = MI->first;
01828     if (Ptr == Arg)
01829       continue; // Handled above.
01830     PtrState &S = MI->second;
01831     Sequence Seq = S.GetSeq();
01832 
01833     // Check for possible releases.
01834     if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
01835       DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
01836             << "\n");
01837       S.ClearKnownPositiveRefCount();
01838       switch (Seq) {
01839       case S_Use:
01840         S.SetSeq(S_CanRelease);
01841         ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S.GetSeq());
01842         continue;
01843       case S_CanRelease:
01844       case S_Release:
01845       case S_MovableRelease:
01846       case S_Stop:
01847       case S_None:
01848         break;
01849       case S_Retain:
01850         llvm_unreachable("bottom-up pointer in retain state!");
01851       }
01852     }
01853 
01854     // Check for possible direct uses.
01855     switch (Seq) {
01856     case S_Release:
01857     case S_MovableRelease:
01858       if (CanUse(Inst, Ptr, PA, Class)) {
01859         DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
01860               << "\n");
01861         assert(!S.HasReverseInsertPts());
01862         // If this is an invoke instruction, we're scanning it as part of
01863         // one of its successor blocks, since we can't insert code after it
01864         // in its own block, and we don't want to split critical edges.
01865         if (isa<InvokeInst>(Inst))
01866           S.InsertReverseInsertPt(BB->getFirstInsertionPt());
01867         else
01868           S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
01869         S.SetSeq(S_Use);
01870         ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
01871       } else if (Seq == S_Release && IsUser(Class)) {
01872         DEBUG(dbgs() << "PreciseReleaseUse: Seq: " << Seq << "; " << *Ptr
01873               << "\n");
01874         // Non-movable releases depend on any possible objc pointer use.
01875         S.SetSeq(S_Stop);
01876         ANNOTATE_BOTTOMUP(Inst, Ptr, S_Release, S_Stop);
01877         assert(!S.HasReverseInsertPts());
01878         // As above; handle invoke specially.
01879         if (isa<InvokeInst>(Inst))
01880           S.InsertReverseInsertPt(BB->getFirstInsertionPt());
01881         else
01882           S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
01883       }
01884       break;
01885     case S_Stop:
01886       if (CanUse(Inst, Ptr, PA, Class)) {
01887         DEBUG(dbgs() << "PreciseStopUse: Seq: " << Seq << "; " << *Ptr
01888               << "\n");
01889         S.SetSeq(S_Use);
01890         ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
01891       }
01892       break;
01893     case S_CanRelease:
01894     case S_Use:
01895     case S_None:
01896       break;
01897     case S_Retain:
01898       llvm_unreachable("bottom-up pointer in retain state!");
01899     }
01900   }
01901 
01902   return NestingDetected;
01903 }
01904 
01905 bool
01906 ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
01907                           DenseMap<const BasicBlock *, BBState> &BBStates,
01908                           MapVector<Value *, RRInfo> &Retains) {
01909 
01910   DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
01911 
01912   bool NestingDetected = false;
01913   BBState &MyStates = BBStates[BB];
01914 
01915   // Merge the states from each successor to compute the initial state
01916   // for the current block.
01917   BBState::edge_iterator SI(MyStates.succ_begin()),
01918                          SE(MyStates.succ_end());
01919   if (SI != SE) {
01920     const BasicBlock *Succ = *SI;
01921     DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
01922     assert(I != BBStates.end());
01923     MyStates.InitFromSucc(I->second);
01924     ++SI;
01925     for (; SI != SE; ++SI) {
01926       Succ = *SI;
01927       I = BBStates.find(Succ);
01928       assert(I != BBStates.end());
01929       MyStates.MergeSucc(I->second);
01930     }
01931   }
01932 
01933   // If ARC Annotations are enabled, output the current state of pointers at the
01934   // bottom of the basic block.
01935   ANNOTATE_BOTTOMUP_BBEND(MyStates, BB);
01936 
01937   // Visit all the instructions, bottom-up.
01938   for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
01939     Instruction *Inst = std::prev(I);
01940 
01941     // Invoke instructions are visited as part of their successors (below).
01942     if (isa<InvokeInst>(Inst))
01943       continue;
01944 
01945     DEBUG(dbgs() << "Visiting " << *Inst << "\n");
01946 
01947     NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
01948   }
01949 
01950   // If there's a predecessor with an invoke, visit the invoke as if it were
01951   // part of this block, since we can't insert code after an invoke in its own
01952   // block, and we don't want to split critical edges.
01953   for (BBState::edge_iterator PI(MyStates.pred_begin()),
01954        PE(MyStates.pred_end()); PI != PE; ++PI) {
01955     BasicBlock *Pred = *PI;
01956     if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
01957       NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
01958   }
01959 
01960   // If ARC Annotations are enabled, output the current state of pointers at the
01961   // top of the basic block.
01962   ANNOTATE_BOTTOMUP_BBSTART(MyStates, BB);
01963 
01964   return NestingDetected;
01965 }
01966 
01967 bool
01968 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
01969                                     DenseMap<Value *, RRInfo> &Releases,
01970                                     BBState &MyStates) {
01971   bool NestingDetected = false;
01972   InstructionClass Class = GetInstructionClass(Inst);
01973   const Value *Arg = nullptr;
01974 
01975   switch (Class) {
01976   case IC_RetainBlock:
01977     // In OptimizeIndividualCalls, we have strength reduced all optimizable
01978     // objc_retainBlocks to objc_retains. Thus at this point any
01979     // objc_retainBlocks that we see are not optimizable.
01980     break;
01981   case IC_Retain:
01982   case IC_RetainRV: {
01983     Arg = GetObjCArg(Inst);
01984 
01985     PtrState &S = MyStates.getPtrTopDownState(Arg);
01986 
01987     // Don't do retain+release tracking for IC_RetainRV, because it's
01988     // better to let it remain as the first instruction after a call.
01989     if (Class != IC_RetainRV) {
01990       // If we see two retains in a row on the same pointer. If so, make
01991       // a note, and we'll cicle back to revisit it after we've
01992       // hopefully eliminated the second retain, which may allow us to
01993       // eliminate the first retain too.
01994       // Theoretically we could implement removal of nested retain+release
01995       // pairs by making PtrState hold a stack of states, but this is
01996       // simple and avoids adding overhead for the non-nested case.
01997       if (S.GetSeq() == S_Retain)
01998         NestingDetected = true;
01999 
02000       ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_Retain);
02001       S.ResetSequenceProgress(S_Retain);
02002       S.SetKnownSafe(S.HasKnownPositiveRefCount());
02003       S.InsertCall(Inst);
02004     }
02005 
02006     S.SetKnownPositiveRefCount();
02007 
02008     // A retain can be a potential use; procede to the generic checking
02009     // code below.
02010     break;
02011   }
02012   case IC_Release: {
02013     Arg = GetObjCArg(Inst);
02014 
02015     PtrState &S = MyStates.getPtrTopDownState(Arg);
02016     S.ClearKnownPositiveRefCount();
02017 
02018     Sequence OldSeq = S.GetSeq();
02019 
02020     MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
02021 
02022     switch (OldSeq) {
02023     case S_Retain:
02024     case S_CanRelease:
02025       if (OldSeq == S_Retain || ReleaseMetadata != nullptr)
02026         S.ClearReverseInsertPts();
02027       // FALL THROUGH
02028     case S_Use:
02029       S.SetReleaseMetadata(ReleaseMetadata);
02030       S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
02031       Releases[Inst] = S.GetRRInfo();
02032       ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_None);
02033       S.ClearSequenceProgress();
02034       break;
02035     case S_None:
02036       break;
02037     case S_Stop:
02038     case S_Release:
02039     case S_MovableRelease:
02040       llvm_unreachable("top-down pointer in release state!");
02041     }
02042     break;
02043   }
02044   case IC_AutoreleasepoolPop:
02045     // Conservatively, clear MyStates for all known pointers.
02046     MyStates.clearTopDownPointers();
02047     return NestingDetected;
02048   case IC_AutoreleasepoolPush:
02049   case IC_None:
02050     // These are irrelevant.
02051     return NestingDetected;
02052   default:
02053     break;
02054   }
02055 
02056   // Consider any other possible effects of this instruction on each
02057   // pointer being tracked.
02058   for (BBState::ptr_iterator MI = MyStates.top_down_ptr_begin(),
02059        ME = MyStates.top_down_ptr_end(); MI != ME; ++MI) {
02060     const Value *Ptr = MI->first;
02061     if (Ptr == Arg)
02062       continue; // Handled above.
02063     PtrState &S = MI->second;
02064     Sequence Seq = S.GetSeq();
02065 
02066     // Check for possible releases.
02067     if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
02068       DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
02069             << "\n");
02070       S.ClearKnownPositiveRefCount();
02071       switch (Seq) {
02072       case S_Retain:
02073         S.SetSeq(S_CanRelease);
02074         ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_CanRelease);
02075         assert(!S.HasReverseInsertPts());
02076         S.InsertReverseInsertPt(Inst);
02077 
02078         // One call can't cause a transition from S_Retain to S_CanRelease
02079         // and S_CanRelease to S_Use. If we've made the first transition,
02080         // we're done.
02081         continue;
02082       case S_Use:
02083       case S_CanRelease:
02084       case S_None:
02085         break;
02086       case S_Stop:
02087       case S_Release:
02088       case S_MovableRelease:
02089         llvm_unreachable("top-down pointer in release state!");
02090       }
02091     }
02092 
02093     // Check for possible direct uses.
02094     switch (Seq) {
02095     case S_CanRelease:
02096       if (CanUse(Inst, Ptr, PA, Class)) {
02097         DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
02098               << "\n");
02099         S.SetSeq(S_Use);
02100         ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_Use);
02101       }
02102       break;
02103     case S_Retain:
02104     case S_Use:
02105     case S_None:
02106       break;
02107     case S_Stop:
02108     case S_Release:
02109     case S_MovableRelease:
02110       llvm_unreachable("top-down pointer in release state!");
02111     }
02112   }
02113 
02114   return NestingDetected;
02115 }
02116 
02117 bool
02118 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
02119                          DenseMap<const BasicBlock *, BBState> &BBStates,
02120                          DenseMap<Value *, RRInfo> &Releases) {
02121   DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
02122   bool NestingDetected = false;
02123   BBState &MyStates = BBStates[BB];
02124 
02125   // Merge the states from each predecessor to compute the initial state
02126   // for the current block.
02127   BBState::edge_iterator PI(MyStates.pred_begin()),
02128                          PE(MyStates.pred_end());
02129   if (PI != PE) {
02130     const BasicBlock *Pred = *PI;
02131     DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
02132     assert(I != BBStates.end());
02133     MyStates.InitFromPred(I->second);
02134     ++PI;
02135     for (; PI != PE; ++PI) {
02136       Pred = *PI;
02137       I = BBStates.find(Pred);
02138       assert(I != BBStates.end());
02139       MyStates.MergePred(I->second);
02140     }
02141   }
02142 
02143   // If ARC Annotations are enabled, output the current state of pointers at the
02144   // top of the basic block.
02145   ANNOTATE_TOPDOWN_BBSTART(MyStates, BB);
02146 
02147   // Visit all the instructions, top-down.
02148   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
02149     Instruction *Inst = I;
02150 
02151     DEBUG(dbgs() << "Visiting " << *Inst << "\n");
02152 
02153     NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
02154   }
02155 
02156   // If ARC Annotations are enabled, output the current state of pointers at the
02157   // bottom of the basic block.
02158   ANNOTATE_TOPDOWN_BBEND(MyStates, BB);
02159 
02160 #ifdef ARC_ANNOTATIONS
02161   if (!(EnableARCAnnotations && DisableCheckForCFGHazards))
02162 #endif
02163   CheckForCFGHazards(BB, BBStates, MyStates);
02164   return NestingDetected;
02165 }
02166 
02167 static void
02168 ComputePostOrders(Function &F,
02169                   SmallVectorImpl<BasicBlock *> &PostOrder,
02170                   SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
02171                   unsigned NoObjCARCExceptionsMDKind,
02172                   DenseMap<const BasicBlock *, BBState> &BBStates) {
02173   /// The visited set, for doing DFS walks.
02174   SmallPtrSet<BasicBlock *, 16> Visited;
02175 
02176   // Do DFS, computing the PostOrder.
02177   SmallPtrSet<BasicBlock *, 16> OnStack;
02178   SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
02179 
02180   // Functions always have exactly one entry block, and we don't have
02181   // any other block that we treat like an entry block.
02182   BasicBlock *EntryBB = &F.getEntryBlock();
02183   BBState &MyStates = BBStates[EntryBB];
02184   MyStates.SetAsEntry();
02185   TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
02186   SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
02187   Visited.insert(EntryBB);
02188   OnStack.insert(EntryBB);
02189   do {
02190   dfs_next_succ:
02191     BasicBlock *CurrBB = SuccStack.back().first;
02192     TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
02193     succ_iterator SE(TI, false);
02194 
02195     while (SuccStack.back().second != SE) {
02196       BasicBlock *SuccBB = *SuccStack.back().second++;
02197       if (Visited.insert(SuccBB)) {
02198         TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
02199         SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
02200         BBStates[CurrBB].addSucc(SuccBB);
02201         BBState &SuccStates = BBStates[SuccBB];
02202         SuccStates.addPred(CurrBB);
02203         OnStack.insert(SuccBB);
02204         goto dfs_next_succ;
02205       }
02206 
02207       if (!OnStack.count(SuccBB)) {
02208         BBStates[CurrBB].addSucc(SuccBB);
02209         BBStates[SuccBB].addPred(CurrBB);
02210       }
02211     }
02212     OnStack.erase(CurrBB);
02213     PostOrder.push_back(CurrBB);
02214     SuccStack.pop_back();
02215   } while (!SuccStack.empty());
02216 
02217   Visited.clear();
02218 
02219   // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
02220   // Functions may have many exits, and there also blocks which we treat
02221   // as exits due to ignored edges.
02222   SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
02223   for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
02224     BasicBlock *ExitBB = I;
02225     BBState &MyStates = BBStates[ExitBB];
02226     if (!MyStates.isExit())
02227       continue;
02228 
02229     MyStates.SetAsExit();
02230 
02231     PredStack.push_back(std::make_pair(ExitBB, MyStates.pred_begin()));
02232     Visited.insert(ExitBB);
02233     while (!PredStack.empty()) {
02234     reverse_dfs_next_succ:
02235       BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
02236       while (PredStack.back().second != PE) {
02237         BasicBlock *BB = *PredStack.back().second++;
02238         if (Visited.insert(BB)) {
02239           PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
02240           goto reverse_dfs_next_succ;
02241         }
02242       }
02243       ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
02244     }
02245   }
02246 }
02247 
02248 // Visit the function both top-down and bottom-up.
02249 bool
02250 ObjCARCOpt::Visit(Function &F,
02251                   DenseMap<const BasicBlock *, BBState> &BBStates,
02252                   MapVector<Value *, RRInfo> &Retains,
02253                   DenseMap<Value *, RRInfo> &Releases) {
02254 
02255   // Use reverse-postorder traversals, because we magically know that loops
02256   // will be well behaved, i.e. they won't repeatedly call retain on a single
02257   // pointer without doing a release. We can't use the ReversePostOrderTraversal
02258   // class here because we want the reverse-CFG postorder to consider each
02259   // function exit point, and we want to ignore selected cycle edges.
02260   SmallVector<BasicBlock *, 16> PostOrder;
02261   SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
02262   ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
02263                     NoObjCARCExceptionsMDKind,
02264                     BBStates);
02265 
02266   // Use reverse-postorder on the reverse CFG for bottom-up.
02267   bool BottomUpNestingDetected = false;
02268   for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
02269        ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
02270        I != E; ++I)
02271     BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
02272 
02273   // Use reverse-postorder for top-down.
02274   bool TopDownNestingDetected = false;
02275   for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
02276        PostOrder.rbegin(), E = PostOrder.rend();
02277        I != E; ++I)
02278     TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
02279 
02280   return TopDownNestingDetected && BottomUpNestingDetected;
02281 }
02282 
02283 /// Move the calls in RetainsToMove and ReleasesToMove.
02284 void ObjCARCOpt::MoveCalls(Value *Arg,
02285                            RRInfo &RetainsToMove,
02286                            RRInfo &ReleasesToMove,
02287                            MapVector<Value *, RRInfo> &Retains,
02288                            DenseMap<Value *, RRInfo> &Releases,
02289                            SmallVectorImpl<Instruction *> &DeadInsts,
02290                            Module *M) {
02291   Type *ArgTy = Arg->getType();
02292   Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
02293 
02294   DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
02295 
02296   // Insert the new retain and release calls.
02297   for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
02298     Value *MyArg = ArgTy == ParamTy ? Arg :
02299                    new BitCastInst(Arg, ParamTy, "", InsertPt);
02300     Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
02301     CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
02302     Call->setDoesNotThrow();
02303     Call->setTailCall();
02304 
02305     DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n"
02306                     "At insertion point: " << *InsertPt << "\n");
02307   }
02308   for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
02309     Value *MyArg = ArgTy == ParamTy ? Arg :
02310                    new BitCastInst(Arg, ParamTy, "", InsertPt);
02311     Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
02312     CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
02313     // Attach a clang.imprecise_release metadata tag, if appropriate.
02314     if (MDNode *M = ReleasesToMove.ReleaseMetadata)
02315       Call->setMetadata(ImpreciseReleaseMDKind, M);
02316     Call->setDoesNotThrow();
02317     if (ReleasesToMove.IsTailCallRelease)
02318       Call->setTailCall();
02319 
02320     DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
02321                     "At insertion point: " << *InsertPt << "\n");
02322   }
02323 
02324   // Delete the original retain and release calls.
02325   for (Instruction *OrigRetain : RetainsToMove.Calls) {
02326     Retains.blot(OrigRetain);
02327     DeadInsts.push_back(OrigRetain);
02328     DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
02329   }
02330   for (Instruction *OrigRelease : ReleasesToMove.Calls) {
02331     Releases.erase(OrigRelease);
02332     DeadInsts.push_back(OrigRelease);
02333     DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
02334   }
02335 
02336 }
02337 
02338 bool
02339 ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
02340                                     &BBStates,
02341                                   MapVector<Value *, RRInfo> &Retains,
02342                                   DenseMap<Value *, RRInfo> &Releases,
02343                                   Module *M,
02344                                   SmallVectorImpl<Instruction *> &NewRetains,
02345                                   SmallVectorImpl<Instruction *> &NewReleases,
02346                                   SmallVectorImpl<Instruction *> &DeadInsts,
02347                                   RRInfo &RetainsToMove,
02348                                   RRInfo &ReleasesToMove,
02349                                   Value *Arg,
02350                                   bool KnownSafe,
02351                                   bool &AnyPairsCompletelyEliminated) {
02352   // If a pair happens in a region where it is known that the reference count
02353   // is already incremented, we can similarly ignore possible decrements unless
02354   // we are dealing with a retainable object with multiple provenance sources.
02355   bool KnownSafeTD = true, KnownSafeBU = true;
02356   bool MultipleOwners = false;
02357   bool CFGHazardAfflicted = false;
02358 
02359   // Connect the dots between the top-down-collected RetainsToMove and
02360   // bottom-up-collected ReleasesToMove to form sets of related calls.
02361   // This is an iterative process so that we connect multiple releases
02362   // to multiple retains if needed.
02363   unsigned OldDelta = 0;
02364   unsigned NewDelta = 0;
02365   unsigned OldCount = 0;
02366   unsigned NewCount = 0;
02367   bool FirstRelease = true;
02368   for (;;) {
02369     for (SmallVectorImpl<Instruction *>::const_iterator
02370            NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
02371       Instruction *NewRetain = *NI;
02372       MapVector<Value *, RRInfo>::const_iterator It = Retains.find(NewRetain);
02373       assert(It != Retains.end());
02374       const RRInfo &NewRetainRRI = It->second;
02375       KnownSafeTD &= NewRetainRRI.KnownSafe;
02376       MultipleOwners =
02377         MultipleOwners || MultiOwnersSet.count(GetObjCArg(NewRetain));
02378       for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
02379         DenseMap<Value *, RRInfo>::const_iterator Jt =
02380           Releases.find(NewRetainRelease);
02381         if (Jt == Releases.end())
02382           return false;
02383         const RRInfo &NewRetainReleaseRRI = Jt->second;
02384 
02385         // If the release does not have a reference to the retain as well,
02386         // something happened which is unaccounted for. Do not do anything.
02387         //
02388         // This can happen if we catch an additive overflow during path count
02389         // merging.
02390         if (!NewRetainReleaseRRI.Calls.count(NewRetain))
02391           return false;
02392 
02393         if (ReleasesToMove.Calls.insert(NewRetainRelease)) {
02394 
02395           // If we overflow when we compute the path count, don't remove/move
02396           // anything.
02397           const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
02398           unsigned PathCount = BBState::OverflowOccurredValue;
02399           if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
02400             return false;
02401           assert(PathCount != BBState::OverflowOccurredValue &&
02402                  "PathCount at this point can not be "
02403                  "OverflowOccurredValue.");
02404           OldDelta -= PathCount;
02405 
02406           // Merge the ReleaseMetadata and IsTailCallRelease values.
02407           if (FirstRelease) {
02408             ReleasesToMove.ReleaseMetadata =
02409               NewRetainReleaseRRI.ReleaseMetadata;
02410             ReleasesToMove.IsTailCallRelease =
02411               NewRetainReleaseRRI.IsTailCallRelease;
02412             FirstRelease = false;
02413           } else {
02414             if (ReleasesToMove.ReleaseMetadata !=
02415                 NewRetainReleaseRRI.ReleaseMetadata)
02416               ReleasesToMove.ReleaseMetadata = nullptr;
02417             if (ReleasesToMove.IsTailCallRelease !=
02418                 NewRetainReleaseRRI.IsTailCallRelease)
02419               ReleasesToMove.IsTailCallRelease = false;
02420           }
02421 
02422           // Collect the optimal insertion points.
02423           if (!KnownSafe)
02424             for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
02425               if (ReleasesToMove.ReverseInsertPts.insert(RIP)) {
02426                 // If we overflow when we compute the path count, don't
02427                 // remove/move anything.
02428                 const BBState &RIPBBState = BBStates[RIP->getParent()];
02429                 PathCount = BBState::OverflowOccurredValue;
02430                 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
02431                   return false;
02432                 assert(PathCount != BBState::OverflowOccurredValue &&
02433                        "PathCount at this point can not be "
02434                        "OverflowOccurredValue.");
02435                 NewDelta -= PathCount;
02436               }
02437             }
02438           NewReleases.push_back(NewRetainRelease);
02439         }
02440       }
02441     }
02442     NewRetains.clear();
02443     if (NewReleases.empty()) break;
02444 
02445     // Back the other way.
02446     for (SmallVectorImpl<Instruction *>::const_iterator
02447            NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
02448       Instruction *NewRelease = *NI;
02449       DenseMap<Value *, RRInfo>::const_iterator It =
02450         Releases.find(NewRelease);
02451       assert(It != Releases.end());
02452       const RRInfo &NewReleaseRRI = It->second;
02453       KnownSafeBU &= NewReleaseRRI.KnownSafe;
02454       CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
02455       for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
02456         MapVector<Value *, RRInfo>::const_iterator Jt =
02457           Retains.find(NewReleaseRetain);
02458         if (Jt == Retains.end())
02459           return false;
02460         const RRInfo &NewReleaseRetainRRI = Jt->second;
02461 
02462         // If the retain does not have a reference to the release as well,
02463         // something happened which is unaccounted for. Do not do anything.
02464         //
02465         // This can happen if we catch an additive overflow during path count
02466         // merging.
02467         if (!NewReleaseRetainRRI.Calls.count(NewRelease))
02468           return false;
02469 
02470         if (RetainsToMove.Calls.insert(NewReleaseRetain)) {
02471           // If we overflow when we compute the path count, don't remove/move
02472           // anything.
02473           const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
02474           unsigned PathCount = BBState::OverflowOccurredValue;
02475           if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
02476             return false;
02477           assert(PathCount != BBState::OverflowOccurredValue &&
02478                  "PathCount at this point can not be "
02479                  "OverflowOccurredValue.");
02480           OldDelta += PathCount;
02481           OldCount += PathCount;
02482 
02483           // Collect the optimal insertion points.
02484           if (!KnownSafe)
02485             for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
02486               if (RetainsToMove.ReverseInsertPts.insert(RIP)) {
02487                 // If we overflow when we compute the path count, don't
02488                 // remove/move anything.
02489                 const BBState &RIPBBState = BBStates[RIP->getParent()];
02490 
02491                 PathCount = BBState::OverflowOccurredValue;
02492                 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
02493                   return false;
02494                 assert(PathCount != BBState::OverflowOccurredValue &&
02495                        "PathCount at this point can not be "
02496                        "OverflowOccurredValue.");
02497                 NewDelta += PathCount;
02498                 NewCount += PathCount;
02499               }
02500             }
02501           NewRetains.push_back(NewReleaseRetain);
02502         }
02503       }
02504     }
02505     NewReleases.clear();
02506     if (NewRetains.empty()) break;
02507   }
02508 
02509   // If the pointer is known incremented in 1 direction and we do not have
02510   // MultipleOwners, we can safely remove the retain/releases. Otherwise we need
02511   // to be known safe in both directions.
02512   bool UnconditionallySafe = (KnownSafeTD && KnownSafeBU) ||
02513     ((KnownSafeTD || KnownSafeBU) && !MultipleOwners);
02514   if (UnconditionallySafe) {
02515     RetainsToMove.ReverseInsertPts.clear();
02516     ReleasesToMove.ReverseInsertPts.clear();
02517     NewCount = 0;
02518   } else {
02519     // Determine whether the new insertion points we computed preserve the
02520     // balance of retain and release calls through the program.
02521     // TODO: If the fully aggressive solution isn't valid, try to find a
02522     // less aggressive solution which is.
02523     if (NewDelta != 0)
02524       return false;
02525 
02526     // At this point, we are not going to remove any RR pairs, but we still are
02527     // able to move RR pairs. If one of our pointers is afflicted with
02528     // CFGHazards, we cannot perform such code motion so exit early.
02529     const bool WillPerformCodeMotion = RetainsToMove.ReverseInsertPts.size() ||
02530       ReleasesToMove.ReverseInsertPts.size();
02531     if (CFGHazardAfflicted && WillPerformCodeMotion)
02532       return false;
02533   }
02534 
02535   // Determine whether the original call points are balanced in the retain and
02536   // release calls through the program. If not, conservatively don't touch
02537   // them.
02538   // TODO: It's theoretically possible to do code motion in this case, as
02539   // long as the existing imbalances are maintained.
02540   if (OldDelta != 0)
02541     return false;
02542 
02543 #ifdef ARC_ANNOTATIONS
02544   // Do not move calls if ARC annotations are requested.
02545   if (EnableARCAnnotations)
02546     return false;
02547 #endif // ARC_ANNOTATIONS
02548 
02549   Changed = true;
02550   assert(OldCount != 0 && "Unreachable code?");
02551   NumRRs += OldCount - NewCount;
02552   // Set to true if we completely removed any RR pairs.
02553   AnyPairsCompletelyEliminated = NewCount == 0;
02554 
02555   // We can move calls!
02556   return true;
02557 }
02558 
02559 /// Identify pairings between the retains and releases, and delete and/or move
02560 /// them.
02561 bool
02562 ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
02563                                    &BBStates,
02564                                  MapVector<Value *, RRInfo> &Retains,
02565                                  DenseMap<Value *, RRInfo> &Releases,
02566                                  Module *M) {
02567   DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
02568 
02569   bool AnyPairsCompletelyEliminated = false;
02570   RRInfo RetainsToMove;
02571   RRInfo ReleasesToMove;
02572   SmallVector<Instruction *, 4> NewRetains;
02573   SmallVector<Instruction *, 4> NewReleases;
02574   SmallVector<Instruction *, 8> DeadInsts;
02575 
02576   // Visit each retain.
02577   for (MapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
02578        E = Retains.end(); I != E; ++I) {
02579     Value *V = I->first;
02580     if (!V) continue; // blotted
02581 
02582     Instruction *Retain = cast<Instruction>(V);
02583 
02584     DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
02585 
02586     Value *Arg = GetObjCArg(Retain);
02587 
02588     // If the object being released is in static or stack storage, we know it's
02589     // not being managed by ObjC reference counting, so we can delete pairs
02590     // regardless of what possible decrements or uses lie between them.
02591     bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
02592 
02593     // A constant pointer can't be pointing to an object on the heap. It may
02594     // be reference-counted, but it won't be deleted.
02595     if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
02596       if (const GlobalVariable *GV =
02597             dyn_cast<GlobalVariable>(
02598               StripPointerCastsAndObjCCalls(LI->getPointerOperand())))
02599         if (GV->isConstant())
02600           KnownSafe = true;
02601 
02602     // Connect the dots between the top-down-collected RetainsToMove and
02603     // bottom-up-collected ReleasesToMove to form sets of related calls.
02604     NewRetains.push_back(Retain);
02605     bool PerformMoveCalls =
02606       ConnectTDBUTraversals(BBStates, Retains, Releases, M, NewRetains,
02607                             NewReleases, DeadInsts, RetainsToMove,
02608                             ReleasesToMove, Arg, KnownSafe,
02609                             AnyPairsCompletelyEliminated);
02610 
02611     if (PerformMoveCalls) {
02612       // Ok, everything checks out and we're all set. Let's move/delete some
02613       // code!
02614       MoveCalls(Arg, RetainsToMove, ReleasesToMove,
02615                 Retains, Releases, DeadInsts, M);
02616     }
02617 
02618     // Clean up state for next retain.
02619     NewReleases.clear();
02620     NewRetains.clear();
02621     RetainsToMove.clear();
02622     ReleasesToMove.clear();
02623   }
02624 
02625   // Now that we're done moving everything, we can delete the newly dead
02626   // instructions, as we no longer need them as insert points.
02627   while (!DeadInsts.empty())
02628     EraseInstruction(DeadInsts.pop_back_val());
02629 
02630   return AnyPairsCompletelyEliminated;
02631 }
02632 
02633 /// Weak pointer optimizations.
02634 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
02635   DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
02636 
02637   // First, do memdep-style RLE and S2L optimizations. We can't use memdep
02638   // itself because it uses AliasAnalysis and we need to do provenance
02639   // queries instead.
02640   for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
02641     Instruction *Inst = &*I++;
02642 
02643     DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
02644 
02645     InstructionClass Class = GetBasicInstructionClass(Inst);
02646     if (Class != IC_LoadWeak && Class != IC_LoadWeakRetained)
02647       continue;
02648 
02649     // Delete objc_loadWeak calls with no users.
02650     if (Class == IC_LoadWeak && Inst->use_empty()) {
02651       Inst->eraseFromParent();
02652       continue;
02653     }
02654 
02655     // TODO: For now, just look for an earlier available version of this value
02656     // within the same block. Theoretically, we could do memdep-style non-local
02657     // analysis too, but that would want caching. A better approach would be to
02658     // use the technique that EarlyCSE uses.
02659     inst_iterator Current = std::prev(I);
02660     BasicBlock *CurrentBB = Current.getBasicBlockIterator();
02661     for (BasicBlock::iterator B = CurrentBB->begin(),
02662                               J = Current.getInstructionIterator();
02663          J != B; --J) {
02664       Instruction *EarlierInst = &*std::prev(J);
02665       InstructionClass EarlierClass = GetInstructionClass(EarlierInst);
02666       switch (EarlierClass) {
02667       case IC_LoadWeak:
02668       case IC_LoadWeakRetained: {
02669         // If this is loading from the same pointer, replace this load's value
02670         // with that one.
02671         CallInst *Call = cast<CallInst>(Inst);
02672         CallInst *EarlierCall = cast<CallInst>(EarlierInst);
02673         Value *Arg = Call->getArgOperand(0);
02674         Value *EarlierArg = EarlierCall->getArgOperand(0);
02675         switch (PA.getAA()->alias(Arg, EarlierArg)) {
02676         case AliasAnalysis::MustAlias:
02677           Changed = true;
02678           // If the load has a builtin retain, insert a plain retain for it.
02679           if (Class == IC_LoadWeakRetained) {
02680             Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
02681             CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
02682             CI->setTailCall();
02683           }
02684           // Zap the fully redundant load.
02685           Call->replaceAllUsesWith(EarlierCall);
02686           Call->eraseFromParent();
02687           goto clobbered;
02688         case AliasAnalysis::MayAlias:
02689         case AliasAnalysis::PartialAlias:
02690           goto clobbered;
02691         case AliasAnalysis::NoAlias:
02692           break;
02693         }
02694         break;
02695       }
02696       case IC_StoreWeak:
02697       case IC_InitWeak: {
02698         // If this is storing to the same pointer and has the same size etc.
02699         // replace this load's value with the stored value.
02700         CallInst *Call = cast<CallInst>(Inst);
02701         CallInst *EarlierCall = cast<CallInst>(EarlierInst);
02702         Value *Arg = Call->getArgOperand(0);
02703         Value *EarlierArg = EarlierCall->getArgOperand(0);
02704         switch (PA.getAA()->alias(Arg, EarlierArg)) {
02705         case AliasAnalysis::MustAlias:
02706           Changed = true;
02707           // If the load has a builtin retain, insert a plain retain for it.
02708           if (Class == IC_LoadWeakRetained) {
02709             Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
02710             CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
02711             CI->setTailCall();
02712           }
02713           // Zap the fully redundant load.
02714           Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
02715           Call->eraseFromParent();
02716           goto clobbered;
02717         case AliasAnalysis::MayAlias:
02718         case AliasAnalysis::PartialAlias:
02719           goto clobbered;
02720         case AliasAnalysis::NoAlias:
02721           break;
02722         }
02723         break;
02724       }
02725       case IC_MoveWeak:
02726       case IC_CopyWeak:
02727         // TOOD: Grab the copied value.
02728         goto clobbered;
02729       case IC_AutoreleasepoolPush:
02730       case IC_None:
02731       case IC_IntrinsicUser:
02732       case IC_User:
02733         // Weak pointers are only modified through the weak entry points
02734         // (and arbitrary calls, which could call the weak entry points).
02735         break;
02736       default:
02737         // Anything else could modify the weak pointer.
02738         goto clobbered;
02739       }
02740     }
02741   clobbered:;
02742   }
02743 
02744   // Then, for each destroyWeak with an alloca operand, check to see if
02745   // the alloca and all its users can be zapped.
02746   for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
02747     Instruction *Inst = &*I++;
02748     InstructionClass Class = GetBasicInstructionClass(Inst);
02749     if (Class != IC_DestroyWeak)
02750       continue;
02751 
02752     CallInst *Call = cast<CallInst>(Inst);
02753     Value *Arg = Call->getArgOperand(0);
02754     if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
02755       for (User *U : Alloca->users()) {
02756         const Instruction *UserInst = cast<Instruction>(U);
02757         switch (GetBasicInstructionClass(UserInst)) {
02758         case IC_InitWeak:
02759         case IC_StoreWeak:
02760         case IC_DestroyWeak:
02761           continue;
02762         default:
02763           goto done;
02764         }
02765       }
02766       Changed = true;
02767       for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
02768         CallInst *UserInst = cast<CallInst>(*UI++);
02769         switch (GetBasicInstructionClass(UserInst)) {
02770         case IC_InitWeak:
02771         case IC_StoreWeak:
02772           // These functions return their second argument.
02773           UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
02774           break;
02775         case IC_DestroyWeak:
02776           // No return value.
02777           break;
02778         default:
02779           llvm_unreachable("alloca really is used!");
02780         }
02781         UserInst->eraseFromParent();
02782       }
02783       Alloca->eraseFromParent();
02784     done:;
02785     }
02786   }
02787 }
02788 
02789 /// Identify program paths which execute sequences of retains and releases which
02790 /// can be eliminated.
02791 bool ObjCARCOpt::OptimizeSequences(Function &F) {
02792   // Releases, Retains - These are used to store the results of the main flow
02793   // analysis. These use Value* as the key instead of Instruction* so that the
02794   // map stays valid when we get around to rewriting code and calls get
02795   // replaced by arguments.
02796   DenseMap<Value *, RRInfo> Releases;
02797   MapVector<Value *, RRInfo> Retains;
02798 
02799   // This is used during the traversal of the function to track the
02800   // states for each identified object at each block.
02801   DenseMap<const BasicBlock *, BBState> BBStates;
02802 
02803   // Analyze the CFG of the function, and all instructions.
02804   bool NestingDetected = Visit(F, BBStates, Retains, Releases);
02805 
02806   // Transform.
02807   bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
02808                                                            Releases,
02809                                                            F.getParent());
02810 
02811   // Cleanup.
02812   MultiOwnersSet.clear();
02813 
02814   return AnyPairsCompletelyEliminated && NestingDetected;
02815 }
02816 
02817 /// Check if there is a dependent call earlier that does not have anything in
02818 /// between the Retain and the call that can affect the reference count of their
02819 /// shared pointer argument. Note that Retain need not be in BB.
02820 static bool
02821 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
02822                              SmallPtrSetImpl<Instruction *> &DepInsts,
02823                              SmallPtrSetImpl<const BasicBlock *> &Visited,
02824                              ProvenanceAnalysis &PA) {
02825   FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
02826                    DepInsts, Visited, PA);
02827   if (DepInsts.size() != 1)
02828     return false;
02829 
02830   CallInst *Call =
02831     dyn_cast_or_null<CallInst>(*DepInsts.begin());
02832 
02833   // Check that the pointer is the return value of the call.
02834   if (!Call || Arg != Call)
02835     return false;
02836 
02837   // Check that the call is a regular call.
02838   InstructionClass Class = GetBasicInstructionClass(Call);
02839   if (Class != IC_CallOrUser && Class != IC_Call)
02840     return false;
02841 
02842   return true;
02843 }
02844 
02845 /// Find a dependent retain that precedes the given autorelease for which there
02846 /// is nothing in between the two instructions that can affect the ref count of
02847 /// Arg.
02848 static CallInst *
02849 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
02850                                   Instruction *Autorelease,
02851                                   SmallPtrSetImpl<Instruction *> &DepInsts,
02852                                   SmallPtrSetImpl<const BasicBlock *> &Visited,
02853                                   ProvenanceAnalysis &PA) {
02854   FindDependencies(CanChangeRetainCount, Arg,
02855                    BB, Autorelease, DepInsts, Visited, PA);
02856   if (DepInsts.size() != 1)
02857     return nullptr;
02858 
02859   CallInst *Retain =
02860     dyn_cast_or_null<CallInst>(*DepInsts.begin());
02861 
02862   // Check that we found a retain with the same argument.
02863   if (!Retain ||
02864       !IsRetain(GetBasicInstructionClass(Retain)) ||
02865       GetObjCArg(Retain) != Arg) {
02866     return nullptr;
02867   }
02868 
02869   return Retain;
02870 }
02871 
02872 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
02873 /// no instructions dependent on Arg that need a positive ref count in between
02874 /// the autorelease and the ret.
02875 static CallInst *
02876 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
02877                                        ReturnInst *Ret,
02878                                        SmallPtrSetImpl<Instruction *> &DepInsts,
02879                                        SmallPtrSetImpl<const BasicBlock *> &V,
02880                                        ProvenanceAnalysis &PA) {
02881   FindDependencies(NeedsPositiveRetainCount, Arg,
02882                    BB, Ret, DepInsts, V, PA);
02883   if (DepInsts.size() != 1)
02884     return nullptr;
02885 
02886   CallInst *Autorelease =
02887     dyn_cast_or_null<CallInst>(*DepInsts.begin());
02888   if (!Autorelease)
02889     return nullptr;
02890   InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease);
02891   if (!IsAutorelease(AutoreleaseClass))
02892     return nullptr;
02893   if (GetObjCArg(Autorelease) != Arg)
02894     return nullptr;
02895 
02896   return Autorelease;
02897 }
02898 
02899 /// Look for this pattern:
02900 /// \code
02901 ///    %call = call i8* @something(...)
02902 ///    %2 = call i8* @objc_retain(i8* %call)
02903 ///    %3 = call i8* @objc_autorelease(i8* %2)
02904 ///    ret i8* %3
02905 /// \endcode
02906 /// And delete the retain and autorelease.
02907 void ObjCARCOpt::OptimizeReturns(Function &F) {
02908   if (!F.getReturnType()->isPointerTy())
02909     return;
02910 
02911   DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
02912 
02913   SmallPtrSet<Instruction *, 4> DependingInstructions;
02914   SmallPtrSet<const BasicBlock *, 4> Visited;
02915   for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
02916     BasicBlock *BB = FI;
02917     ReturnInst *Ret = dyn_cast<ReturnInst>(&BB->back());
02918 
02919     DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
02920 
02921     if (!Ret)
02922       continue;
02923 
02924     const Value *Arg = StripPointerCastsAndObjCCalls(Ret->getOperand(0));
02925 
02926     // Look for an ``autorelease'' instruction that is a predecessor of Ret and
02927     // dependent on Arg such that there are no instructions dependent on Arg
02928     // that need a positive ref count in between the autorelease and Ret.
02929     CallInst *Autorelease =
02930       FindPredecessorAutoreleaseWithSafePath(Arg, BB, Ret,
02931                                              DependingInstructions, Visited,
02932                                              PA);
02933     DependingInstructions.clear();
02934     Visited.clear();
02935 
02936     if (!Autorelease)
02937       continue;
02938 
02939     CallInst *Retain =
02940       FindPredecessorRetainWithSafePath(Arg, BB, Autorelease,
02941                                         DependingInstructions, Visited, PA);
02942     DependingInstructions.clear();
02943     Visited.clear();
02944 
02945     if (!Retain)
02946       continue;
02947 
02948     // Check that there is nothing that can affect the reference count
02949     // between the retain and the call.  Note that Retain need not be in BB.
02950     bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain,
02951                                                           DependingInstructions,
02952                                                           Visited, PA);
02953     DependingInstructions.clear();
02954     Visited.clear();
02955 
02956     if (!HasSafePathToCall)
02957       continue;
02958 
02959     // If so, we can zap the retain and autorelease.
02960     Changed = true;
02961     ++NumRets;
02962     DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
02963           << *Autorelease << "\n");
02964     EraseInstruction(Retain);
02965     EraseInstruction(Autorelease);
02966   }
02967 }
02968 
02969 #ifndef NDEBUG
02970 void
02971 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
02972   llvm::Statistic &NumRetains =
02973     AfterOptimization? NumRetainsAfterOpt : NumRetainsBeforeOpt;
02974   llvm::Statistic &NumReleases =
02975     AfterOptimization? NumReleasesAfterOpt : NumReleasesBeforeOpt;
02976 
02977   for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
02978     Instruction *Inst = &*I++;
02979     switch (GetBasicInstructionClass(Inst)) {
02980     default:
02981       break;
02982     case IC_Retain:
02983       ++NumRetains;
02984       break;
02985     case IC_Release:
02986       ++NumReleases;
02987       break;
02988     }
02989   }
02990 }
02991 #endif
02992 
02993 bool ObjCARCOpt::doInitialization(Module &M) {
02994   if (!EnableARCOpts)
02995     return false;
02996 
02997   // If nothing in the Module uses ARC, don't do anything.
02998   Run = ModuleHasARC(M);
02999   if (!Run)
03000     return false;
03001 
03002   // Identify the imprecise release metadata kind.
03003   ImpreciseReleaseMDKind =
03004     M.getContext().getMDKindID("clang.imprecise_release");
03005   CopyOnEscapeMDKind =
03006     M.getContext().getMDKindID("clang.arc.copy_on_escape");
03007   NoObjCARCExceptionsMDKind =
03008     M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
03009 #ifdef ARC_ANNOTATIONS
03010   ARCAnnotationBottomUpMDKind =
03011     M.getContext().getMDKindID("llvm.arc.annotation.bottomup");
03012   ARCAnnotationTopDownMDKind =
03013     M.getContext().getMDKindID("llvm.arc.annotation.topdown");
03014   ARCAnnotationProvenanceSourceMDKind =
03015     M.getContext().getMDKindID("llvm.arc.annotation.provenancesource");
03016 #endif // ARC_ANNOTATIONS
03017 
03018   // Intuitively, objc_retain and others are nocapture, however in practice
03019   // they are not, because they return their argument value. And objc_release
03020   // calls finalizers which can have arbitrary side effects.
03021 
03022   // Initialize our runtime entry point cache.
03023   EP.Initialize(&M);
03024 
03025   return false;
03026 }
03027 
03028 bool ObjCARCOpt::runOnFunction(Function &F) {
03029   if (!EnableARCOpts)
03030     return false;
03031 
03032   // If nothing in the Module uses ARC, don't do anything.
03033   if (!Run)
03034     return false;
03035 
03036   Changed = false;
03037 
03038   DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
03039         "\n");
03040 
03041   PA.setAA(&getAnalysis<AliasAnalysis>());
03042 
03043 #ifndef NDEBUG
03044   if (AreStatisticsEnabled()) {
03045     GatherStatistics(F, false);
03046   }
03047 #endif
03048 
03049   // This pass performs several distinct transformations. As a compile-time aid
03050   // when compiling code that isn't ObjC, skip these if the relevant ObjC
03051   // library functions aren't declared.
03052 
03053   // Preliminary optimizations. This also computes UsedInThisFunction.
03054   OptimizeIndividualCalls(F);
03055 
03056   // Optimizations for weak pointers.
03057   if (UsedInThisFunction & ((1 << IC_LoadWeak) |
03058                             (1 << IC_LoadWeakRetained) |
03059                             (1 << IC_StoreWeak) |
03060                             (1 << IC_InitWeak) |
03061                             (1 << IC_CopyWeak) |
03062                             (1 << IC_MoveWeak) |
03063                             (1 << IC_DestroyWeak)))
03064     OptimizeWeakCalls(F);
03065 
03066   // Optimizations for retain+release pairs.
03067   if (UsedInThisFunction & ((1 << IC_Retain) |
03068                             (1 << IC_RetainRV) |
03069                             (1 << IC_RetainBlock)))
03070     if (UsedInThisFunction & (1 << IC_Release))
03071       // Run OptimizeSequences until it either stops making changes or
03072       // no retain+release pair nesting is detected.
03073       while (OptimizeSequences(F)) {}
03074 
03075   // Optimizations if objc_autorelease is used.
03076   if (UsedInThisFunction & ((1 << IC_Autorelease) |
03077                             (1 << IC_AutoreleaseRV)))
03078     OptimizeReturns(F);
03079 
03080   // Gather statistics after optimization.
03081 #ifndef NDEBUG
03082   if (AreStatisticsEnabled()) {
03083     GatherStatistics(F, true);
03084   }
03085 #endif
03086 
03087   DEBUG(dbgs() << "\n");
03088 
03089   return Changed;
03090 }
03091 
03092 void ObjCARCOpt::releaseMemory() {
03093   PA.clear();
03094 }
03095 
03096 /// @}
03097 ///