LLVM API Documentation

RegAllocGreedy.cpp
Go to the documentation of this file.
00001 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file defines the RAGreedy function pass for register allocation in
00011 // optimized builds.
00012 //
00013 //===----------------------------------------------------------------------===//
00014 
00015 #include "llvm/CodeGen/Passes.h"
00016 #include "AllocationOrder.h"
00017 #include "InterferenceCache.h"
00018 #include "LiveDebugVariables.h"
00019 #include "RegAllocBase.h"
00020 #include "SpillPlacement.h"
00021 #include "Spiller.h"
00022 #include "SplitKit.h"
00023 #include "llvm/ADT/Statistic.h"
00024 #include "llvm/Analysis/AliasAnalysis.h"
00025 #include "llvm/CodeGen/CalcSpillWeights.h"
00026 #include "llvm/CodeGen/EdgeBundles.h"
00027 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
00028 #include "llvm/CodeGen/LiveRangeEdit.h"
00029 #include "llvm/CodeGen/LiveRegMatrix.h"
00030 #include "llvm/CodeGen/LiveStackAnalysis.h"
00031 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
00032 #include "llvm/CodeGen/MachineDominators.h"
00033 #include "llvm/CodeGen/MachineFunctionPass.h"
00034 #include "llvm/CodeGen/MachineLoopInfo.h"
00035 #include "llvm/CodeGen/MachineRegisterInfo.h"
00036 #include "llvm/CodeGen/RegAllocRegistry.h"
00037 #include "llvm/CodeGen/RegisterClassInfo.h"
00038 #include "llvm/CodeGen/VirtRegMap.h"
00039 #include "llvm/IR/LLVMContext.h"
00040 #include "llvm/PassAnalysisSupport.h"
00041 #include "llvm/Support/BranchProbability.h"
00042 #include "llvm/Support/CommandLine.h"
00043 #include "llvm/Support/Debug.h"
00044 #include "llvm/Support/ErrorHandling.h"
00045 #include "llvm/Support/Timer.h"
00046 #include "llvm/Support/raw_ostream.h"
00047 #include "llvm/Target/TargetSubtargetInfo.h"
00048 #include <queue>
00049 
00050 using namespace llvm;
00051 
00052 #define DEBUG_TYPE "regalloc"
00053 
00054 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
00055 STATISTIC(NumLocalSplits,  "Number of split local live ranges");
00056 STATISTIC(NumEvicted,      "Number of interferences evicted");
00057 
00058 static cl::opt<SplitEditor::ComplementSpillMode>
00059 SplitSpillMode("split-spill-mode", cl::Hidden,
00060   cl::desc("Spill mode for splitting live ranges"),
00061   cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),
00062              clEnumValN(SplitEditor::SM_Size,  "size",  "Optimize for size"),
00063              clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"),
00064              clEnumValEnd),
00065   cl::init(SplitEditor::SM_Partition));
00066 
00067 static cl::opt<unsigned>
00068 LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden,
00069                              cl::desc("Last chance recoloring max depth"),
00070                              cl::init(5));
00071 
00072 static cl::opt<unsigned> LastChanceRecoloringMaxInterference(
00073     "lcr-max-interf", cl::Hidden,
00074     cl::desc("Last chance recoloring maximum number of considered"
00075              " interference at a time"),
00076     cl::init(8));
00077 
00078 static cl::opt<bool>
00079 ExhaustiveSearch("exhaustive-register-search", cl::NotHidden,
00080                  cl::desc("Exhaustive Search for registers bypassing the depth "
00081                           "and interference cutoffs of last chance recoloring"));
00082 
00083 static cl::opt<bool> EnableLocalReassignment(
00084     "enable-local-reassign", cl::Hidden,
00085     cl::desc("Local reassignment can yield better allocation decisions, but "
00086              "may be compile time intensive"),
00087     cl::init(false));
00088 
00089 // FIXME: Find a good default for this flag and remove the flag.
00090 static cl::opt<unsigned>
00091 CSRFirstTimeCost("regalloc-csr-first-time-cost",
00092               cl::desc("Cost for first time use of callee-saved register."),
00093               cl::init(0), cl::Hidden);
00094 
00095 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
00096                                        createGreedyRegisterAllocator);
00097 
00098 namespace {
00099 class RAGreedy : public MachineFunctionPass,
00100                  public RegAllocBase,
00101                  private LiveRangeEdit::Delegate {
00102   // Convenient shortcuts.
00103   typedef std::priority_queue<std::pair<unsigned, unsigned> > PQueue;
00104   typedef SmallPtrSet<LiveInterval *, 4> SmallLISet;
00105   typedef SmallSet<unsigned, 16> SmallVirtRegSet;
00106 
00107   // context
00108   MachineFunction *MF;
00109 
00110   // Shortcuts to some useful interface.
00111   const TargetInstrInfo *TII;
00112   const TargetRegisterInfo *TRI;
00113   RegisterClassInfo RCI;
00114 
00115   // analyses
00116   SlotIndexes *Indexes;
00117   MachineBlockFrequencyInfo *MBFI;
00118   MachineDominatorTree *DomTree;
00119   MachineLoopInfo *Loops;
00120   EdgeBundles *Bundles;
00121   SpillPlacement *SpillPlacer;
00122   LiveDebugVariables *DebugVars;
00123 
00124   // state
00125   std::unique_ptr<Spiller> SpillerInstance;
00126   PQueue Queue;
00127   unsigned NextCascade;
00128 
00129   // Live ranges pass through a number of stages as we try to allocate them.
00130   // Some of the stages may also create new live ranges:
00131   //
00132   // - Region splitting.
00133   // - Per-block splitting.
00134   // - Local splitting.
00135   // - Spilling.
00136   //
00137   // Ranges produced by one of the stages skip the previous stages when they are
00138   // dequeued. This improves performance because we can skip interference checks
00139   // that are unlikely to give any results. It also guarantees that the live
00140   // range splitting algorithm terminates, something that is otherwise hard to
00141   // ensure.
00142   enum LiveRangeStage {
00143     /// Newly created live range that has never been queued.
00144     RS_New,
00145 
00146     /// Only attempt assignment and eviction. Then requeue as RS_Split.
00147     RS_Assign,
00148 
00149     /// Attempt live range splitting if assignment is impossible.
00150     RS_Split,
00151 
00152     /// Attempt more aggressive live range splitting that is guaranteed to make
00153     /// progress.  This is used for split products that may not be making
00154     /// progress.
00155     RS_Split2,
00156 
00157     /// Live range will be spilled.  No more splitting will be attempted.
00158     RS_Spill,
00159 
00160     /// There is nothing more we can do to this live range.  Abort compilation
00161     /// if it can't be assigned.
00162     RS_Done
00163   };
00164 
00165   // Enum CutOffStage to keep a track whether the register allocation failed
00166   // because of the cutoffs encountered in last chance recoloring.
00167   // Note: This is used as bitmask. New value should be next power of 2.
00168   enum CutOffStage {
00169     // No cutoffs encountered
00170     CO_None = 0,
00171 
00172     // lcr-max-depth cutoff encountered
00173     CO_Depth = 1,
00174 
00175     // lcr-max-interf cutoff encountered
00176     CO_Interf = 2
00177   };
00178 
00179   uint8_t CutOffInfo;
00180 
00181 #ifndef NDEBUG
00182   static const char *const StageName[];
00183 #endif
00184 
00185   // RegInfo - Keep additional information about each live range.
00186   struct RegInfo {
00187     LiveRangeStage Stage;
00188 
00189     // Cascade - Eviction loop prevention. See canEvictInterference().
00190     unsigned Cascade;
00191 
00192     RegInfo() : Stage(RS_New), Cascade(0) {}
00193   };
00194 
00195   IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
00196 
00197   LiveRangeStage getStage(const LiveInterval &VirtReg) const {
00198     return ExtraRegInfo[VirtReg.reg].Stage;
00199   }
00200 
00201   void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
00202     ExtraRegInfo.resize(MRI->getNumVirtRegs());
00203     ExtraRegInfo[VirtReg.reg].Stage = Stage;
00204   }
00205 
00206   template<typename Iterator>
00207   void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
00208     ExtraRegInfo.resize(MRI->getNumVirtRegs());
00209     for (;Begin != End; ++Begin) {
00210       unsigned Reg = *Begin;
00211       if (ExtraRegInfo[Reg].Stage == RS_New)
00212         ExtraRegInfo[Reg].Stage = NewStage;
00213     }
00214   }
00215 
00216   /// Cost of evicting interference.
00217   struct EvictionCost {
00218     unsigned BrokenHints; ///< Total number of broken hints.
00219     float MaxWeight;      ///< Maximum spill weight evicted.
00220 
00221     EvictionCost(): BrokenHints(0), MaxWeight(0) {}
00222 
00223     bool isMax() const { return BrokenHints == ~0u; }
00224 
00225     void setMax() { BrokenHints = ~0u; }
00226 
00227     void setBrokenHints(unsigned NHints) { BrokenHints = NHints; }
00228 
00229     bool operator<(const EvictionCost &O) const {
00230       return std::tie(BrokenHints, MaxWeight) <
00231              std::tie(O.BrokenHints, O.MaxWeight);
00232     }
00233   };
00234 
00235   // splitting state.
00236   std::unique_ptr<SplitAnalysis> SA;
00237   std::unique_ptr<SplitEditor> SE;
00238 
00239   /// Cached per-block interference maps
00240   InterferenceCache IntfCache;
00241 
00242   /// All basic blocks where the current register has uses.
00243   SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
00244 
00245   /// Global live range splitting candidate info.
00246   struct GlobalSplitCandidate {
00247     // Register intended for assignment, or 0.
00248     unsigned PhysReg;
00249 
00250     // SplitKit interval index for this candidate.
00251     unsigned IntvIdx;
00252 
00253     // Interference for PhysReg.
00254     InterferenceCache::Cursor Intf;
00255 
00256     // Bundles where this candidate should be live.
00257     BitVector LiveBundles;
00258     SmallVector<unsigned, 8> ActiveBlocks;
00259 
00260     void reset(InterferenceCache &Cache, unsigned Reg) {
00261       PhysReg = Reg;
00262       IntvIdx = 0;
00263       Intf.setPhysReg(Cache, Reg);
00264       LiveBundles.clear();
00265       ActiveBlocks.clear();
00266     }
00267 
00268     // Set B[i] = C for every live bundle where B[i] was NoCand.
00269     unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) {
00270       unsigned Count = 0;
00271       for (int i = LiveBundles.find_first(); i >= 0;
00272            i = LiveBundles.find_next(i))
00273         if (B[i] == NoCand) {
00274           B[i] = C;
00275           Count++;
00276         }
00277       return Count;
00278     }
00279   };
00280 
00281   /// Candidate info for each PhysReg in AllocationOrder.
00282   /// This vector never shrinks, but grows to the size of the largest register
00283   /// class.
00284   SmallVector<GlobalSplitCandidate, 32> GlobalCand;
00285 
00286   enum : unsigned { NoCand = ~0u };
00287 
00288   /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
00289   /// NoCand which indicates the stack interval.
00290   SmallVector<unsigned, 32> BundleCand;
00291 
00292   /// Callee-save register cost, calculated once per machine function.
00293   BlockFrequency CSRCost;
00294 
00295   /// Run or not the local reassignment heuristic. This information is
00296   /// obtained from the TargetSubtargetInfo.
00297   bool EnableLocalReassign;
00298 
00299 public:
00300   RAGreedy();
00301 
00302   /// Return the pass name.
00303   const char* getPassName() const override {
00304     return "Greedy Register Allocator";
00305   }
00306 
00307   /// RAGreedy analysis usage.
00308   void getAnalysisUsage(AnalysisUsage &AU) const override;
00309   void releaseMemory() override;
00310   Spiller &spiller() override { return *SpillerInstance; }
00311   void enqueue(LiveInterval *LI) override;
00312   LiveInterval *dequeue() override;
00313   unsigned selectOrSplit(LiveInterval&, SmallVectorImpl<unsigned>&) override;
00314 
00315   /// Perform register allocation.
00316   bool runOnMachineFunction(MachineFunction &mf) override;
00317 
00318   static char ID;
00319 
00320 private:
00321   unsigned selectOrSplitImpl(LiveInterval &, SmallVectorImpl<unsigned> &,
00322                              SmallVirtRegSet &, unsigned = 0);
00323 
00324   bool LRE_CanEraseVirtReg(unsigned) override;
00325   void LRE_WillShrinkVirtReg(unsigned) override;
00326   void LRE_DidCloneVirtReg(unsigned, unsigned) override;
00327   void enqueue(PQueue &CurQueue, LiveInterval *LI);
00328   LiveInterval *dequeue(PQueue &CurQueue);
00329 
00330   BlockFrequency calcSpillCost();
00331   bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&);
00332   void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
00333   void growRegion(GlobalSplitCandidate &Cand);
00334   BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate&);
00335   bool calcCompactRegion(GlobalSplitCandidate&);
00336   void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>);
00337   void calcGapWeights(unsigned, SmallVectorImpl<float>&);
00338   unsigned canReassign(LiveInterval &VirtReg, unsigned PhysReg);
00339   bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
00340   bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
00341   void evictInterference(LiveInterval&, unsigned,
00342                          SmallVectorImpl<unsigned>&);
00343   bool mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
00344                                   SmallLISet &RecoloringCandidates,
00345                                   const SmallVirtRegSet &FixedRegisters);
00346 
00347   unsigned tryAssign(LiveInterval&, AllocationOrder&,
00348                      SmallVectorImpl<unsigned>&);
00349   unsigned tryEvict(LiveInterval&, AllocationOrder&,
00350                     SmallVectorImpl<unsigned>&, unsigned = ~0u);
00351   unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
00352                           SmallVectorImpl<unsigned>&);
00353   /// Calculate cost of region splitting.
00354   unsigned calculateRegionSplitCost(LiveInterval &VirtReg,
00355                                     AllocationOrder &Order,
00356                                     BlockFrequency &BestCost,
00357                                     unsigned &NumCands, bool IgnoreCSR);
00358   /// Perform region splitting.
00359   unsigned doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
00360                          bool HasCompact,
00361                          SmallVectorImpl<unsigned> &NewVRegs);
00362   /// Check other options before using a callee-saved register for the first
00363   /// time.
00364   unsigned tryAssignCSRFirstTime(LiveInterval &VirtReg, AllocationOrder &Order,
00365                                  unsigned PhysReg, unsigned &CostPerUseLimit,
00366                                  SmallVectorImpl<unsigned> &NewVRegs);
00367   void initializeCSRCost();
00368   unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
00369                          SmallVectorImpl<unsigned>&);
00370   unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&,
00371                                SmallVectorImpl<unsigned>&);
00372   unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
00373     SmallVectorImpl<unsigned>&);
00374   unsigned trySplit(LiveInterval&, AllocationOrder&,
00375                     SmallVectorImpl<unsigned>&);
00376   unsigned tryLastChanceRecoloring(LiveInterval &, AllocationOrder &,
00377                                    SmallVectorImpl<unsigned> &,
00378                                    SmallVirtRegSet &, unsigned);
00379   bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<unsigned> &,
00380                                SmallVirtRegSet &, unsigned);
00381 };
00382 } // end anonymous namespace
00383 
00384 char RAGreedy::ID = 0;
00385 
00386 #ifndef NDEBUG
00387 const char *const RAGreedy::StageName[] = {
00388     "RS_New",
00389     "RS_Assign",
00390     "RS_Split",
00391     "RS_Split2",
00392     "RS_Spill",
00393     "RS_Done"
00394 };
00395 #endif
00396 
00397 // Hysteresis to use when comparing floats.
00398 // This helps stabilize decisions based on float comparisons.
00399 const float Hysteresis = (2007 / 2048.0f); // 0.97998046875
00400 
00401 
00402 FunctionPass* llvm::createGreedyRegisterAllocator() {
00403   return new RAGreedy();
00404 }
00405 
00406 RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
00407   initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
00408   initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
00409   initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
00410   initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
00411   initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
00412   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
00413   initializeLiveStacksPass(*PassRegistry::getPassRegistry());
00414   initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
00415   initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
00416   initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
00417   initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry());
00418   initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
00419   initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
00420 }
00421 
00422 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
00423   AU.setPreservesCFG();
00424   AU.addRequired<MachineBlockFrequencyInfo>();
00425   AU.addPreserved<MachineBlockFrequencyInfo>();
00426   AU.addRequired<AliasAnalysis>();
00427   AU.addPreserved<AliasAnalysis>();
00428   AU.addRequired<LiveIntervals>();
00429   AU.addPreserved<LiveIntervals>();
00430   AU.addRequired<SlotIndexes>();
00431   AU.addPreserved<SlotIndexes>();
00432   AU.addRequired<LiveDebugVariables>();
00433   AU.addPreserved<LiveDebugVariables>();
00434   AU.addRequired<LiveStacks>();
00435   AU.addPreserved<LiveStacks>();
00436   AU.addRequired<MachineDominatorTree>();
00437   AU.addPreserved<MachineDominatorTree>();
00438   AU.addRequired<MachineLoopInfo>();
00439   AU.addPreserved<MachineLoopInfo>();
00440   AU.addRequired<VirtRegMap>();
00441   AU.addPreserved<VirtRegMap>();
00442   AU.addRequired<LiveRegMatrix>();
00443   AU.addPreserved<LiveRegMatrix>();
00444   AU.addRequired<EdgeBundles>();
00445   AU.addRequired<SpillPlacement>();
00446   MachineFunctionPass::getAnalysisUsage(AU);
00447 }
00448 
00449 
00450 //===----------------------------------------------------------------------===//
00451 //                     LiveRangeEdit delegate methods
00452 //===----------------------------------------------------------------------===//
00453 
00454 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
00455   if (VRM->hasPhys(VirtReg)) {
00456     Matrix->unassign(LIS->getInterval(VirtReg));
00457     return true;
00458   }
00459   // Unassigned virtreg is probably in the priority queue.
00460   // RegAllocBase will erase it after dequeueing.
00461   return false;
00462 }
00463 
00464 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
00465   if (!VRM->hasPhys(VirtReg))
00466     return;
00467 
00468   // Register is assigned, put it back on the queue for reassignment.
00469   LiveInterval &LI = LIS->getInterval(VirtReg);
00470   Matrix->unassign(LI);
00471   enqueue(&LI);
00472 }
00473 
00474 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
00475   // Cloning a register we haven't even heard about yet?  Just ignore it.
00476   if (!ExtraRegInfo.inBounds(Old))
00477     return;
00478 
00479   // LRE may clone a virtual register because dead code elimination causes it to
00480   // be split into connected components. The new components are much smaller
00481   // than the original, so they should get a new chance at being assigned.
00482   // same stage as the parent.
00483   ExtraRegInfo[Old].Stage = RS_Assign;
00484   ExtraRegInfo.grow(New);
00485   ExtraRegInfo[New] = ExtraRegInfo[Old];
00486 }
00487 
00488 void RAGreedy::releaseMemory() {
00489   SpillerInstance.reset();
00490   ExtraRegInfo.clear();
00491   GlobalCand.clear();
00492 }
00493 
00494 void RAGreedy::enqueue(LiveInterval *LI) { enqueue(Queue, LI); }
00495 
00496 void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) {
00497   // Prioritize live ranges by size, assigning larger ranges first.
00498   // The queue holds (size, reg) pairs.
00499   const unsigned Size = LI->getSize();
00500   const unsigned Reg = LI->reg;
00501   assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
00502          "Can only enqueue virtual registers");
00503   unsigned Prio;
00504 
00505   ExtraRegInfo.grow(Reg);
00506   if (ExtraRegInfo[Reg].Stage == RS_New)
00507     ExtraRegInfo[Reg].Stage = RS_Assign;
00508 
00509   if (ExtraRegInfo[Reg].Stage == RS_Split) {
00510     // Unsplit ranges that couldn't be allocated immediately are deferred until
00511     // everything else has been allocated.
00512     Prio = Size;
00513   } else {
00514     // Giant live ranges fall back to the global assignment heuristic, which
00515     // prevents excessive spilling in pathological cases.
00516     bool ReverseLocal = TRI->reverseLocalAssignment();
00517     bool ForceGlobal = !ReverseLocal && TRI->mayOverrideLocalAssignment() &&
00518       (Size / SlotIndex::InstrDist) > (2 * MRI->getRegClass(Reg)->getNumRegs());
00519 
00520     if (ExtraRegInfo[Reg].Stage == RS_Assign && !ForceGlobal && !LI->empty() &&
00521         LIS->intervalIsInOneMBB(*LI)) {
00522       // Allocate original local ranges in linear instruction order. Since they
00523       // are singly defined, this produces optimal coloring in the absence of
00524       // global interference and other constraints.
00525       if (!ReverseLocal)
00526         Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex());
00527       else {
00528         // Allocating bottom up may allow many short LRGs to be assigned first
00529         // to one of the cheap registers. This could be much faster for very
00530         // large blocks on targets with many physical registers.
00531         Prio = Indexes->getZeroIndex().getInstrDistance(LI->beginIndex());
00532       }
00533     }
00534     else {
00535       // Allocate global and split ranges in long->short order. Long ranges that
00536       // don't fit should be spilled (or split) ASAP so they don't create
00537       // interference.  Mark a bit to prioritize global above local ranges.
00538       Prio = (1u << 29) + Size;
00539     }
00540     // Mark a higher bit to prioritize global and local above RS_Split.
00541     Prio |= (1u << 31);
00542 
00543     // Boost ranges that have a physical register hint.
00544     if (VRM->hasKnownPreference(Reg))
00545       Prio |= (1u << 30);
00546   }
00547   // The virtual register number is a tie breaker for same-sized ranges.
00548   // Give lower vreg numbers higher priority to assign them first.
00549   CurQueue.push(std::make_pair(Prio, ~Reg));
00550 }
00551 
00552 LiveInterval *RAGreedy::dequeue() { return dequeue(Queue); }
00553 
00554 LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) {
00555   if (CurQueue.empty())
00556     return nullptr;
00557   LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second);
00558   CurQueue.pop();
00559   return LI;
00560 }
00561 
00562 
00563 //===----------------------------------------------------------------------===//
00564 //                            Direct Assignment
00565 //===----------------------------------------------------------------------===//
00566 
00567 /// tryAssign - Try to assign VirtReg to an available register.
00568 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
00569                              AllocationOrder &Order,
00570                              SmallVectorImpl<unsigned> &NewVRegs) {
00571   Order.rewind();
00572   unsigned PhysReg;
00573   while ((PhysReg = Order.next()))
00574     if (!Matrix->checkInterference(VirtReg, PhysReg))
00575       break;
00576   if (!PhysReg || Order.isHint())
00577     return PhysReg;
00578 
00579   // PhysReg is available, but there may be a better choice.
00580 
00581   // If we missed a simple hint, try to cheaply evict interference from the
00582   // preferred register.
00583   if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
00584     if (Order.isHint(Hint)) {
00585       DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
00586       EvictionCost MaxCost;
00587       MaxCost.setBrokenHints(1);
00588       if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
00589         evictInterference(VirtReg, Hint, NewVRegs);
00590         return Hint;
00591       }
00592     }
00593 
00594   // Try to evict interference from a cheaper alternative.
00595   unsigned Cost = TRI->getCostPerUse(PhysReg);
00596 
00597   // Most registers have 0 additional cost.
00598   if (!Cost)
00599     return PhysReg;
00600 
00601   DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
00602                << '\n');
00603   unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
00604   return CheapReg ? CheapReg : PhysReg;
00605 }
00606 
00607 
00608 //===----------------------------------------------------------------------===//
00609 //                         Interference eviction
00610 //===----------------------------------------------------------------------===//
00611 
00612 unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) {
00613   AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
00614   unsigned PhysReg;
00615   while ((PhysReg = Order.next())) {
00616     if (PhysReg == PrevReg)
00617       continue;
00618 
00619     MCRegUnitIterator Units(PhysReg, TRI);
00620     for (; Units.isValid(); ++Units) {
00621       // Instantiate a "subquery", not to be confused with the Queries array.
00622       LiveIntervalUnion::Query subQ(&VirtReg, &Matrix->getLiveUnions()[*Units]);
00623       if (subQ.checkInterference())
00624         break;
00625     }
00626     // If no units have interference, break out with the current PhysReg.
00627     if (!Units.isValid())
00628       break;
00629   }
00630   if (PhysReg)
00631     DEBUG(dbgs() << "can reassign: " << VirtReg << " from "
00632           << PrintReg(PrevReg, TRI) << " to " << PrintReg(PhysReg, TRI)
00633           << '\n');
00634   return PhysReg;
00635 }
00636 
00637 /// shouldEvict - determine if A should evict the assigned live range B. The
00638 /// eviction policy defined by this function together with the allocation order
00639 /// defined by enqueue() decides which registers ultimately end up being split
00640 /// and spilled.
00641 ///
00642 /// Cascade numbers are used to prevent infinite loops if this function is a
00643 /// cyclic relation.
00644 ///
00645 /// @param A          The live range to be assigned.
00646 /// @param IsHint     True when A is about to be assigned to its preferred
00647 ///                   register.
00648 /// @param B          The live range to be evicted.
00649 /// @param BreaksHint True when B is already assigned to its preferred register.
00650 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
00651                            LiveInterval &B, bool BreaksHint) {
00652   bool CanSplit = getStage(B) < RS_Spill;
00653 
00654   // Be fairly aggressive about following hints as long as the evictee can be
00655   // split.
00656   if (CanSplit && IsHint && !BreaksHint)
00657     return true;
00658 
00659   if (A.weight > B.weight) {
00660     DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight << '\n');
00661     return true;
00662   }
00663   return false;
00664 }
00665 
00666 /// canEvictInterference - Return true if all interferences between VirtReg and
00667 /// PhysReg can be evicted.
00668 ///
00669 /// @param VirtReg Live range that is about to be assigned.
00670 /// @param PhysReg Desired register for assignment.
00671 /// @param IsHint  True when PhysReg is VirtReg's preferred register.
00672 /// @param MaxCost Only look for cheaper candidates and update with new cost
00673 ///                when returning true.
00674 /// @returns True when interference can be evicted cheaper than MaxCost.
00675 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
00676                                     bool IsHint, EvictionCost &MaxCost) {
00677   // It is only possible to evict virtual register interference.
00678   if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg)
00679     return false;
00680 
00681   bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
00682 
00683   // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
00684   // involved in an eviction before. If a cascade number was assigned, deny
00685   // evicting anything with the same or a newer cascade number. This prevents
00686   // infinite eviction loops.
00687   //
00688   // This works out so a register without a cascade number is allowed to evict
00689   // anything, and it can be evicted by anything.
00690   unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
00691   if (!Cascade)
00692     Cascade = NextCascade;
00693 
00694   EvictionCost Cost;
00695   for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
00696     LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
00697     // If there is 10 or more interferences, chances are one is heavier.
00698     if (Q.collectInterferingVRegs(10) >= 10)
00699       return false;
00700 
00701     // Check if any interfering live range is heavier than MaxWeight.
00702     for (unsigned i = Q.interferingVRegs().size(); i; --i) {
00703       LiveInterval *Intf = Q.interferingVRegs()[i - 1];
00704       assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) &&
00705              "Only expecting virtual register interference from query");
00706       // Never evict spill products. They cannot split or spill.
00707       if (getStage(*Intf) == RS_Done)
00708         return false;
00709       // Once a live range becomes small enough, it is urgent that we find a
00710       // register for it. This is indicated by an infinite spill weight. These
00711       // urgent live ranges get to evict almost anything.
00712       //
00713       // Also allow urgent evictions of unspillable ranges from a strictly
00714       // larger allocation order.
00715       bool Urgent = !VirtReg.isSpillable() &&
00716         (Intf->isSpillable() ||
00717          RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) <
00718          RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg)));
00719       // Only evict older cascades or live ranges without a cascade.
00720       unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
00721       if (Cascade <= IntfCascade) {
00722         if (!Urgent)
00723           return false;
00724         // We permit breaking cascades for urgent evictions. It should be the
00725         // last resort, though, so make it really expensive.
00726         Cost.BrokenHints += 10;
00727       }
00728       // Would this break a satisfied hint?
00729       bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
00730       // Update eviction cost.
00731       Cost.BrokenHints += BreaksHint;
00732       Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
00733       // Abort if this would be too expensive.
00734       if (!(Cost < MaxCost))
00735         return false;
00736       if (Urgent)
00737         continue;
00738       // Apply the eviction policy for non-urgent evictions.
00739       if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
00740         return false;
00741       // If !MaxCost.isMax(), then we're just looking for a cheap register.
00742       // Evicting another local live range in this case could lead to suboptimal
00743       // coloring.
00744       if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
00745           (!EnableLocalReassign || !canReassign(*Intf, PhysReg))) {
00746         return false;
00747       }
00748     }
00749   }
00750   MaxCost = Cost;
00751   return true;
00752 }
00753 
00754 /// evictInterference - Evict any interferring registers that prevent VirtReg
00755 /// from being assigned to Physreg. This assumes that canEvictInterference
00756 /// returned true.
00757 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
00758                                  SmallVectorImpl<unsigned> &NewVRegs) {
00759   // Make sure that VirtReg has a cascade number, and assign that cascade
00760   // number to every evicted register. These live ranges than then only be
00761   // evicted by a newer cascade, preventing infinite loops.
00762   unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
00763   if (!Cascade)
00764     Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
00765 
00766   DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
00767                << " interference: Cascade " << Cascade << '\n');
00768 
00769   // Collect all interfering virtregs first.
00770   SmallVector<LiveInterval*, 8> Intfs;
00771   for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
00772     LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
00773     assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
00774     ArrayRef<LiveInterval*> IVR = Q.interferingVRegs();
00775     Intfs.append(IVR.begin(), IVR.end());
00776   }
00777 
00778   // Evict them second. This will invalidate the queries.
00779   for (unsigned i = 0, e = Intfs.size(); i != e; ++i) {
00780     LiveInterval *Intf = Intfs[i];
00781     // The same VirtReg may be present in multiple RegUnits. Skip duplicates.
00782     if (!VRM->hasPhys(Intf->reg))
00783       continue;
00784     Matrix->unassign(*Intf);
00785     assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
00786             VirtReg.isSpillable() < Intf->isSpillable()) &&
00787            "Cannot decrease cascade number, illegal eviction");
00788     ExtraRegInfo[Intf->reg].Cascade = Cascade;
00789     ++NumEvicted;
00790     NewVRegs.push_back(Intf->reg);
00791   }
00792 }
00793 
00794 /// tryEvict - Try to evict all interferences for a physreg.
00795 /// @param  VirtReg Currently unassigned virtual register.
00796 /// @param  Order   Physregs to try.
00797 /// @return         Physreg to assign VirtReg, or 0.
00798 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
00799                             AllocationOrder &Order,
00800                             SmallVectorImpl<unsigned> &NewVRegs,
00801                             unsigned CostPerUseLimit) {
00802   NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
00803 
00804   // Keep track of the cheapest interference seen so far.
00805   EvictionCost BestCost;
00806   BestCost.setMax();
00807   unsigned BestPhys = 0;
00808   unsigned OrderLimit = Order.getOrder().size();
00809 
00810   // When we are just looking for a reduced cost per use, don't break any
00811   // hints, and only evict smaller spill weights.
00812   if (CostPerUseLimit < ~0u) {
00813     BestCost.BrokenHints = 0;
00814     BestCost.MaxWeight = VirtReg.weight;
00815 
00816     // Check of any registers in RC are below CostPerUseLimit.
00817     const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg);
00818     unsigned MinCost = RegClassInfo.getMinCost(RC);
00819     if (MinCost >= CostPerUseLimit) {
00820       DEBUG(dbgs() << RC->getName() << " minimum cost = " << MinCost
00821                    << ", no cheaper registers to be found.\n");
00822       return 0;
00823     }
00824 
00825     // It is normal for register classes to have a long tail of registers with
00826     // the same cost. We don't need to look at them if they're too expensive.
00827     if (TRI->getCostPerUse(Order.getOrder().back()) >= CostPerUseLimit) {
00828       OrderLimit = RegClassInfo.getLastCostChange(RC);
00829       DEBUG(dbgs() << "Only trying the first " << OrderLimit << " regs.\n");
00830     }
00831   }
00832 
00833   Order.rewind();
00834   while (unsigned PhysReg = Order.next(OrderLimit)) {
00835     if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
00836       continue;
00837     // The first use of a callee-saved register in a function has cost 1.
00838     // Don't start using a CSR when the CostPerUseLimit is low.
00839     if (CostPerUseLimit == 1)
00840      if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
00841        if (!MRI->isPhysRegUsed(CSR)) {
00842          DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
00843                       << PrintReg(CSR, TRI) << '\n');
00844          continue;
00845        }
00846 
00847     if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
00848       continue;
00849 
00850     // Best so far.
00851     BestPhys = PhysReg;
00852 
00853     // Stop if the hint can be used.
00854     if (Order.isHint())
00855       break;
00856   }
00857 
00858   if (!BestPhys)
00859     return 0;
00860 
00861   evictInterference(VirtReg, BestPhys, NewVRegs);
00862   return BestPhys;
00863 }
00864 
00865 
00866 //===----------------------------------------------------------------------===//
00867 //                              Region Splitting
00868 //===----------------------------------------------------------------------===//
00869 
00870 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
00871 /// interference pattern in Physreg and its aliases. Add the constraints to
00872 /// SpillPlacement and return the static cost of this split in Cost, assuming
00873 /// that all preferences in SplitConstraints are met.
00874 /// Return false if there are no bundles with positive bias.
00875 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
00876                                    BlockFrequency &Cost) {
00877   ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
00878 
00879   // Reset interference dependent info.
00880   SplitConstraints.resize(UseBlocks.size());
00881   BlockFrequency StaticCost = 0;
00882   for (unsigned i = 0; i != UseBlocks.size(); ++i) {
00883     const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
00884     SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
00885 
00886     BC.Number = BI.MBB->getNumber();
00887     Intf.moveToBlock(BC.Number);
00888     BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
00889     BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
00890     BC.ChangesValue = BI.FirstDef.isValid();
00891 
00892     if (!Intf.hasInterference())
00893       continue;
00894 
00895     // Number of spill code instructions to insert.
00896     unsigned Ins = 0;
00897 
00898     // Interference for the live-in value.
00899     if (BI.LiveIn) {
00900       if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
00901         BC.Entry = SpillPlacement::MustSpill, ++Ins;
00902       else if (Intf.first() < BI.FirstInstr)
00903         BC.Entry = SpillPlacement::PrefSpill, ++Ins;
00904       else if (Intf.first() < BI.LastInstr)
00905         ++Ins;
00906     }
00907 
00908     // Interference for the live-out value.
00909     if (BI.LiveOut) {
00910       if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
00911         BC.Exit = SpillPlacement::MustSpill, ++Ins;
00912       else if (Intf.last() > BI.LastInstr)
00913         BC.Exit = SpillPlacement::PrefSpill, ++Ins;
00914       else if (Intf.last() > BI.FirstInstr)
00915         ++Ins;
00916     }
00917 
00918     // Accumulate the total frequency of inserted spill code.
00919     while (Ins--)
00920       StaticCost += SpillPlacer->getBlockFrequency(BC.Number);
00921   }
00922   Cost = StaticCost;
00923 
00924   // Add constraints for use-blocks. Note that these are the only constraints
00925   // that may add a positive bias, it is downhill from here.
00926   SpillPlacer->addConstraints(SplitConstraints);
00927   return SpillPlacer->scanActiveBundles();
00928 }
00929 
00930 
00931 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
00932 /// live-through blocks in Blocks.
00933 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
00934                                      ArrayRef<unsigned> Blocks) {
00935   const unsigned GroupSize = 8;
00936   SpillPlacement::BlockConstraint BCS[GroupSize];
00937   unsigned TBS[GroupSize];
00938   unsigned B = 0, T = 0;
00939 
00940   for (unsigned i = 0; i != Blocks.size(); ++i) {
00941     unsigned Number = Blocks[i];
00942     Intf.moveToBlock(Number);
00943 
00944     if (!Intf.hasInterference()) {
00945       assert(T < GroupSize && "Array overflow");
00946       TBS[T] = Number;
00947       if (++T == GroupSize) {
00948         SpillPlacer->addLinks(makeArrayRef(TBS, T));
00949         T = 0;
00950       }
00951       continue;
00952     }
00953 
00954     assert(B < GroupSize && "Array overflow");
00955     BCS[B].Number = Number;
00956 
00957     // Interference for the live-in value.
00958     if (Intf.first() <= Indexes->getMBBStartIdx(Number))
00959       BCS[B].Entry = SpillPlacement::MustSpill;
00960     else
00961       BCS[B].Entry = SpillPlacement::PrefSpill;
00962 
00963     // Interference for the live-out value.
00964     if (Intf.last() >= SA->getLastSplitPoint(Number))
00965       BCS[B].Exit = SpillPlacement::MustSpill;
00966     else
00967       BCS[B].Exit = SpillPlacement::PrefSpill;
00968 
00969     if (++B == GroupSize) {
00970       SpillPlacer->addConstraints(makeArrayRef(BCS, B));
00971       B = 0;
00972     }
00973   }
00974 
00975   SpillPlacer->addConstraints(makeArrayRef(BCS, B));
00976   SpillPlacer->addLinks(makeArrayRef(TBS, T));
00977 }
00978 
00979 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
00980   // Keep track of through blocks that have not been added to SpillPlacer.
00981   BitVector Todo = SA->getThroughBlocks();
00982   SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
00983   unsigned AddedTo = 0;
00984 #ifndef NDEBUG
00985   unsigned Visited = 0;
00986 #endif
00987 
00988   for (;;) {
00989     ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
00990     // Find new through blocks in the periphery of PrefRegBundles.
00991     for (int i = 0, e = NewBundles.size(); i != e; ++i) {
00992       unsigned Bundle = NewBundles[i];
00993       // Look at all blocks connected to Bundle in the full graph.
00994       ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
00995       for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
00996            I != E; ++I) {
00997         unsigned Block = *I;
00998         if (!Todo.test(Block))
00999           continue;
01000         Todo.reset(Block);
01001         // This is a new through block. Add it to SpillPlacer later.
01002         ActiveBlocks.push_back(Block);
01003 #ifndef NDEBUG
01004         ++Visited;
01005 #endif
01006       }
01007     }
01008     // Any new blocks to add?
01009     if (ActiveBlocks.size() == AddedTo)
01010       break;
01011 
01012     // Compute through constraints from the interference, or assume that all
01013     // through blocks prefer spilling when forming compact regions.
01014     auto NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
01015     if (Cand.PhysReg)
01016       addThroughConstraints(Cand.Intf, NewBlocks);
01017     else
01018       // Provide a strong negative bias on through blocks to prevent unwanted
01019       // liveness on loop backedges.
01020       SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true);
01021     AddedTo = ActiveBlocks.size();
01022 
01023     // Perhaps iterating can enable more bundles?
01024     SpillPlacer->iterate();
01025   }
01026   DEBUG(dbgs() << ", v=" << Visited);
01027 }
01028 
01029 /// calcCompactRegion - Compute the set of edge bundles that should be live
01030 /// when splitting the current live range into compact regions.  Compact
01031 /// regions can be computed without looking at interference.  They are the
01032 /// regions formed by removing all the live-through blocks from the live range.
01033 ///
01034 /// Returns false if the current live range is already compact, or if the
01035 /// compact regions would form single block regions anyway.
01036 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
01037   // Without any through blocks, the live range is already compact.
01038   if (!SA->getNumThroughBlocks())
01039     return false;
01040 
01041   // Compact regions don't correspond to any physreg.
01042   Cand.reset(IntfCache, 0);
01043 
01044   DEBUG(dbgs() << "Compact region bundles");
01045 
01046   // Use the spill placer to determine the live bundles. GrowRegion pretends
01047   // that all the through blocks have interference when PhysReg is unset.
01048   SpillPlacer->prepare(Cand.LiveBundles);
01049 
01050   // The static split cost will be zero since Cand.Intf reports no interference.
01051   BlockFrequency Cost;
01052   if (!addSplitConstraints(Cand.Intf, Cost)) {
01053     DEBUG(dbgs() << ", none.\n");
01054     return false;
01055   }
01056 
01057   growRegion(Cand);
01058   SpillPlacer->finish();
01059 
01060   if (!Cand.LiveBundles.any()) {
01061     DEBUG(dbgs() << ", none.\n");
01062     return false;
01063   }
01064 
01065   DEBUG({
01066     for (int i = Cand.LiveBundles.find_first(); i>=0;
01067          i = Cand.LiveBundles.find_next(i))
01068     dbgs() << " EB#" << i;
01069     dbgs() << ".\n";
01070   });
01071   return true;
01072 }
01073 
01074 /// calcSpillCost - Compute how expensive it would be to split the live range in
01075 /// SA around all use blocks instead of forming bundle regions.
01076 BlockFrequency RAGreedy::calcSpillCost() {
01077   BlockFrequency Cost = 0;
01078   ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
01079   for (unsigned i = 0; i != UseBlocks.size(); ++i) {
01080     const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
01081     unsigned Number = BI.MBB->getNumber();
01082     // We normally only need one spill instruction - a load or a store.
01083     Cost += SpillPlacer->getBlockFrequency(Number);
01084 
01085     // Unless the value is redefined in the block.
01086     if (BI.LiveIn && BI.LiveOut && BI.FirstDef)
01087       Cost += SpillPlacer->getBlockFrequency(Number);
01088   }
01089   return Cost;
01090 }
01091 
01092 /// calcGlobalSplitCost - Return the global split cost of following the split
01093 /// pattern in LiveBundles. This cost should be added to the local cost of the
01094 /// interference pattern in SplitConstraints.
01095 ///
01096 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) {
01097   BlockFrequency GlobalCost = 0;
01098   const BitVector &LiveBundles = Cand.LiveBundles;
01099   ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
01100   for (unsigned i = 0; i != UseBlocks.size(); ++i) {
01101     const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
01102     SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
01103     bool RegIn  = LiveBundles[Bundles->getBundle(BC.Number, 0)];
01104     bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
01105     unsigned Ins = 0;
01106 
01107     if (BI.LiveIn)
01108       Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
01109     if (BI.LiveOut)
01110       Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
01111     while (Ins--)
01112       GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
01113   }
01114 
01115   for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
01116     unsigned Number = Cand.ActiveBlocks[i];
01117     bool RegIn  = LiveBundles[Bundles->getBundle(Number, 0)];
01118     bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
01119     if (!RegIn && !RegOut)
01120       continue;
01121     if (RegIn && RegOut) {
01122       // We need double spill code if this block has interference.
01123       Cand.Intf.moveToBlock(Number);
01124       if (Cand.Intf.hasInterference()) {
01125         GlobalCost += SpillPlacer->getBlockFrequency(Number);
01126         GlobalCost += SpillPlacer->getBlockFrequency(Number);
01127       }
01128       continue;
01129     }
01130     // live-in / stack-out or stack-in live-out.
01131     GlobalCost += SpillPlacer->getBlockFrequency(Number);
01132   }
01133   return GlobalCost;
01134 }
01135 
01136 /// splitAroundRegion - Split the current live range around the regions
01137 /// determined by BundleCand and GlobalCand.
01138 ///
01139 /// Before calling this function, GlobalCand and BundleCand must be initialized
01140 /// so each bundle is assigned to a valid candidate, or NoCand for the
01141 /// stack-bound bundles.  The shared SA/SE SplitAnalysis and SplitEditor
01142 /// objects must be initialized for the current live range, and intervals
01143 /// created for the used candidates.
01144 ///
01145 /// @param LREdit    The LiveRangeEdit object handling the current split.
01146 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value
01147 ///                  must appear in this list.
01148 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
01149                                  ArrayRef<unsigned> UsedCands) {
01150   // These are the intervals created for new global ranges. We may create more
01151   // intervals for local ranges.
01152   const unsigned NumGlobalIntvs = LREdit.size();
01153   DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n");
01154   assert(NumGlobalIntvs && "No global intervals configured");
01155 
01156   // Isolate even single instructions when dealing with a proper sub-class.
01157   // That guarantees register class inflation for the stack interval because it
01158   // is all copies.
01159   unsigned Reg = SA->getParent().reg;
01160   bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
01161 
01162   // First handle all the blocks with uses.
01163   ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
01164   for (unsigned i = 0; i != UseBlocks.size(); ++i) {
01165     const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
01166     unsigned Number = BI.MBB->getNumber();
01167     unsigned IntvIn = 0, IntvOut = 0;
01168     SlotIndex IntfIn, IntfOut;
01169     if (BI.LiveIn) {
01170       unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
01171       if (CandIn != NoCand) {
01172         GlobalSplitCandidate &Cand = GlobalCand[CandIn];
01173         IntvIn = Cand.IntvIdx;
01174         Cand.Intf.moveToBlock(Number);
01175         IntfIn = Cand.Intf.first();
01176       }
01177     }
01178     if (BI.LiveOut) {
01179       unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
01180       if (CandOut != NoCand) {
01181         GlobalSplitCandidate &Cand = GlobalCand[CandOut];
01182         IntvOut = Cand.IntvIdx;
01183         Cand.Intf.moveToBlock(Number);
01184         IntfOut = Cand.Intf.last();
01185       }
01186     }
01187 
01188     // Create separate intervals for isolated blocks with multiple uses.
01189     if (!IntvIn && !IntvOut) {
01190       DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
01191       if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
01192         SE->splitSingleBlock(BI);
01193       continue;
01194     }
01195 
01196     if (IntvIn && IntvOut)
01197       SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
01198     else if (IntvIn)
01199       SE->splitRegInBlock(BI, IntvIn, IntfIn);
01200     else
01201       SE->splitRegOutBlock(BI, IntvOut, IntfOut);
01202   }
01203 
01204   // Handle live-through blocks. The relevant live-through blocks are stored in
01205   // the ActiveBlocks list with each candidate. We need to filter out
01206   // duplicates.
01207   BitVector Todo = SA->getThroughBlocks();
01208   for (unsigned c = 0; c != UsedCands.size(); ++c) {
01209     ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks;
01210     for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
01211       unsigned Number = Blocks[i];
01212       if (!Todo.test(Number))
01213         continue;
01214       Todo.reset(Number);
01215 
01216       unsigned IntvIn = 0, IntvOut = 0;
01217       SlotIndex IntfIn, IntfOut;
01218 
01219       unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
01220       if (CandIn != NoCand) {
01221         GlobalSplitCandidate &Cand = GlobalCand[CandIn];
01222         IntvIn = Cand.IntvIdx;
01223         Cand.Intf.moveToBlock(Number);
01224         IntfIn = Cand.Intf.first();
01225       }
01226 
01227       unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
01228       if (CandOut != NoCand) {
01229         GlobalSplitCandidate &Cand = GlobalCand[CandOut];
01230         IntvOut = Cand.IntvIdx;
01231         Cand.Intf.moveToBlock(Number);
01232         IntfOut = Cand.Intf.last();
01233       }
01234       if (!IntvIn && !IntvOut)
01235         continue;
01236       SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
01237     }
01238   }
01239 
01240   ++NumGlobalSplits;
01241 
01242   SmallVector<unsigned, 8> IntvMap;
01243   SE->finish(&IntvMap);
01244   DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
01245 
01246   ExtraRegInfo.resize(MRI->getNumVirtRegs());
01247   unsigned OrigBlocks = SA->getNumLiveBlocks();
01248 
01249   // Sort out the new intervals created by splitting. We get four kinds:
01250   // - Remainder intervals should not be split again.
01251   // - Candidate intervals can be assigned to Cand.PhysReg.
01252   // - Block-local splits are candidates for local splitting.
01253   // - DCE leftovers should go back on the queue.
01254   for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
01255     LiveInterval &Reg = LIS->getInterval(LREdit.get(i));
01256 
01257     // Ignore old intervals from DCE.
01258     if (getStage(Reg) != RS_New)
01259       continue;
01260 
01261     // Remainder interval. Don't try splitting again, spill if it doesn't
01262     // allocate.
01263     if (IntvMap[i] == 0) {
01264       setStage(Reg, RS_Spill);
01265       continue;
01266     }
01267 
01268     // Global intervals. Allow repeated splitting as long as the number of live
01269     // blocks is strictly decreasing.
01270     if (IntvMap[i] < NumGlobalIntvs) {
01271       if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
01272         DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
01273                      << " blocks as original.\n");
01274         // Don't allow repeated splitting as a safe guard against looping.
01275         setStage(Reg, RS_Split2);
01276       }
01277       continue;
01278     }
01279 
01280     // Other intervals are treated as new. This includes local intervals created
01281     // for blocks with multiple uses, and anything created by DCE.
01282   }
01283 
01284   if (VerifyEnabled)
01285     MF->verify(this, "After splitting live range around region");
01286 }
01287 
01288 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
01289                                   SmallVectorImpl<unsigned> &NewVRegs) {
01290   unsigned NumCands = 0;
01291   BlockFrequency BestCost;
01292 
01293   // Check if we can split this live range around a compact region.
01294   bool HasCompact = calcCompactRegion(GlobalCand.front());
01295   if (HasCompact) {
01296     // Yes, keep GlobalCand[0] as the compact region candidate.
01297     NumCands = 1;
01298     BestCost = BlockFrequency::getMaxFrequency();
01299   } else {
01300     // No benefit from the compact region, our fallback will be per-block
01301     // splitting. Make sure we find a solution that is cheaper than spilling.
01302     BestCost = calcSpillCost();
01303     DEBUG(dbgs() << "Cost of isolating all blocks = ";
01304                  MBFI->printBlockFreq(dbgs(), BestCost) << '\n');
01305   }
01306 
01307   unsigned BestCand =
01308       calculateRegionSplitCost(VirtReg, Order, BestCost, NumCands,
01309                                false/*IgnoreCSR*/);
01310 
01311   // No solutions found, fall back to single block splitting.
01312   if (!HasCompact && BestCand == NoCand)
01313     return 0;
01314 
01315   return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs);
01316 }
01317 
01318 unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg,
01319                                             AllocationOrder &Order,
01320                                             BlockFrequency &BestCost,
01321                                             unsigned &NumCands,
01322                                             bool IgnoreCSR) {
01323   unsigned BestCand = NoCand;
01324   Order.rewind();
01325   while (unsigned PhysReg = Order.next()) {
01326    if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
01327      if (IgnoreCSR && !MRI->isPhysRegUsed(CSR))
01328        continue;
01329 
01330     // Discard bad candidates before we run out of interference cache cursors.
01331     // This will only affect register classes with a lot of registers (>32).
01332     if (NumCands == IntfCache.getMaxCursors()) {
01333       unsigned WorstCount = ~0u;
01334       unsigned Worst = 0;
01335       for (unsigned i = 0; i != NumCands; ++i) {
01336         if (i == BestCand || !GlobalCand[i].PhysReg)
01337           continue;
01338         unsigned Count = GlobalCand[i].LiveBundles.count();
01339         if (Count < WorstCount)
01340           Worst = i, WorstCount = Count;
01341       }
01342       --NumCands;
01343       GlobalCand[Worst] = GlobalCand[NumCands];
01344       if (BestCand == NumCands)
01345         BestCand = Worst;
01346     }
01347 
01348     if (GlobalCand.size() <= NumCands)
01349       GlobalCand.resize(NumCands+1);
01350     GlobalSplitCandidate &Cand = GlobalCand[NumCands];
01351     Cand.reset(IntfCache, PhysReg);
01352 
01353     SpillPlacer->prepare(Cand.LiveBundles);
01354     BlockFrequency Cost;
01355     if (!addSplitConstraints(Cand.Intf, Cost)) {
01356       DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
01357       continue;
01358     }
01359     DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = ";
01360                  MBFI->printBlockFreq(dbgs(), Cost));
01361     if (Cost >= BestCost) {
01362       DEBUG({
01363         if (BestCand == NoCand)
01364           dbgs() << " worse than no bundles\n";
01365         else
01366           dbgs() << " worse than "
01367                  << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
01368       });
01369       continue;
01370     }
01371     growRegion(Cand);
01372 
01373     SpillPlacer->finish();
01374 
01375     // No live bundles, defer to splitSingleBlocks().
01376     if (!Cand.LiveBundles.any()) {
01377       DEBUG(dbgs() << " no bundles.\n");
01378       continue;
01379     }
01380 
01381     Cost += calcGlobalSplitCost(Cand);
01382     DEBUG({
01383       dbgs() << ", total = "; MBFI->printBlockFreq(dbgs(), Cost)
01384                                 << " with bundles";
01385       for (int i = Cand.LiveBundles.find_first(); i>=0;
01386            i = Cand.LiveBundles.find_next(i))
01387         dbgs() << " EB#" << i;
01388       dbgs() << ".\n";
01389     });
01390     if (Cost < BestCost) {
01391       BestCand = NumCands;
01392       BestCost = Cost;
01393     }
01394     ++NumCands;
01395   }
01396   return BestCand;
01397 }
01398 
01399 unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
01400                                  bool HasCompact,
01401                                  SmallVectorImpl<unsigned> &NewVRegs) {
01402   SmallVector<unsigned, 8> UsedCands;
01403   // Prepare split editor.
01404   LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
01405   SE->reset(LREdit, SplitSpillMode);
01406 
01407   // Assign all edge bundles to the preferred candidate, or NoCand.
01408   BundleCand.assign(Bundles->getNumBundles(), NoCand);
01409 
01410   // Assign bundles for the best candidate region.
01411   if (BestCand != NoCand) {
01412     GlobalSplitCandidate &Cand = GlobalCand[BestCand];
01413     if (unsigned B = Cand.getBundles(BundleCand, BestCand)) {
01414       UsedCands.push_back(BestCand);
01415       Cand.IntvIdx = SE->openIntv();
01416       DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in "
01417                    << B << " bundles, intv " << Cand.IntvIdx << ".\n");
01418       (void)B;
01419     }
01420   }
01421 
01422   // Assign bundles for the compact region.
01423   if (HasCompact) {
01424     GlobalSplitCandidate &Cand = GlobalCand.front();
01425     assert(!Cand.PhysReg && "Compact region has no physreg");
01426     if (unsigned B = Cand.getBundles(BundleCand, 0)) {
01427       UsedCands.push_back(0);
01428       Cand.IntvIdx = SE->openIntv();
01429       DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv "
01430                    << Cand.IntvIdx << ".\n");
01431       (void)B;
01432     }
01433   }
01434 
01435   splitAroundRegion(LREdit, UsedCands);
01436   return 0;
01437 }
01438 
01439 
01440 //===----------------------------------------------------------------------===//
01441 //                            Per-Block Splitting
01442 //===----------------------------------------------------------------------===//
01443 
01444 /// tryBlockSplit - Split a global live range around every block with uses. This
01445 /// creates a lot of local live ranges, that will be split by tryLocalSplit if
01446 /// they don't allocate.
01447 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
01448                                  SmallVectorImpl<unsigned> &NewVRegs) {
01449   assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
01450   unsigned Reg = VirtReg.reg;
01451   bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
01452   LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
01453   SE->reset(LREdit, SplitSpillMode);
01454   ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
01455   for (unsigned i = 0; i != UseBlocks.size(); ++i) {
01456     const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
01457     if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
01458       SE->splitSingleBlock(BI);
01459   }
01460   // No blocks were split.
01461   if (LREdit.empty())
01462     return 0;
01463 
01464   // We did split for some blocks.
01465   SmallVector<unsigned, 8> IntvMap;
01466   SE->finish(&IntvMap);
01467 
01468   // Tell LiveDebugVariables about the new ranges.
01469   DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
01470 
01471   ExtraRegInfo.resize(MRI->getNumVirtRegs());
01472 
01473   // Sort out the new intervals created by splitting. The remainder interval
01474   // goes straight to spilling, the new local ranges get to stay RS_New.
01475   for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
01476     LiveInterval &LI = LIS->getInterval(LREdit.get(i));
01477     if (getStage(LI) == RS_New && IntvMap[i] == 0)
01478       setStage(LI, RS_Spill);
01479   }
01480 
01481   if (VerifyEnabled)
01482     MF->verify(this, "After splitting live range around basic blocks");
01483   return 0;
01484 }
01485 
01486 
01487 //===----------------------------------------------------------------------===//
01488 //                         Per-Instruction Splitting
01489 //===----------------------------------------------------------------------===//
01490 
01491 /// Get the number of allocatable registers that match the constraints of \p Reg
01492 /// on \p MI and that are also in \p SuperRC.
01493 static unsigned getNumAllocatableRegsForConstraints(
01494     const MachineInstr *MI, unsigned Reg, const TargetRegisterClass *SuperRC,
01495     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
01496     const RegisterClassInfo &RCI) {
01497   assert(SuperRC && "Invalid register class");
01498 
01499   const TargetRegisterClass *ConstrainedRC =
01500       MI->getRegClassConstraintEffectForVReg(Reg, SuperRC, TII, TRI,
01501                                              /* ExploreBundle */ true);
01502   if (!ConstrainedRC)
01503     return 0;
01504   return RCI.getNumAllocatableRegs(ConstrainedRC);
01505 }
01506 
01507 /// tryInstructionSplit - Split a live range around individual instructions.
01508 /// This is normally not worthwhile since the spiller is doing essentially the
01509 /// same thing. However, when the live range is in a constrained register
01510 /// class, it may help to insert copies such that parts of the live range can
01511 /// be moved to a larger register class.
01512 ///
01513 /// This is similar to spilling to a larger register class.
01514 unsigned
01515 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
01516                               SmallVectorImpl<unsigned> &NewVRegs) {
01517   const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg);
01518   // There is no point to this if there are no larger sub-classes.
01519   if (!RegClassInfo.isProperSubClass(CurRC))
01520     return 0;
01521 
01522   // Always enable split spill mode, since we're effectively spilling to a
01523   // register.
01524   LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
01525   SE->reset(LREdit, SplitEditor::SM_Size);
01526 
01527   ArrayRef<SlotIndex> Uses = SA->getUseSlots();
01528   if (Uses.size() <= 1)
01529     return 0;
01530 
01531   DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n");
01532 
01533   const TargetRegisterClass *SuperRC = TRI->getLargestLegalSuperClass(CurRC);
01534   unsigned SuperRCNumAllocatableRegs = RCI.getNumAllocatableRegs(SuperRC);
01535   // Split around every non-copy instruction if this split will relax
01536   // the constraints on the virtual register.
01537   // Otherwise, splitting just inserts uncoalescable copies that do not help
01538   // the allocation.
01539   for (unsigned i = 0; i != Uses.size(); ++i) {
01540     if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]))
01541       if (MI->isFullCopy() ||
01542           SuperRCNumAllocatableRegs ==
01543               getNumAllocatableRegsForConstraints(MI, VirtReg.reg, SuperRC, TII,
01544                                                   TRI, RCI)) {
01545         DEBUG(dbgs() << "    skip:\t" << Uses[i] << '\t' << *MI);
01546         continue;
01547       }
01548     SE->openIntv();
01549     SlotIndex SegStart = SE->enterIntvBefore(Uses[i]);
01550     SlotIndex SegStop  = SE->leaveIntvAfter(Uses[i]);
01551     SE->useIntv(SegStart, SegStop);
01552   }
01553 
01554   if (LREdit.empty()) {
01555     DEBUG(dbgs() << "All uses were copies.\n");
01556     return 0;
01557   }
01558 
01559   SmallVector<unsigned, 8> IntvMap;
01560   SE->finish(&IntvMap);
01561   DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS);
01562   ExtraRegInfo.resize(MRI->getNumVirtRegs());
01563 
01564   // Assign all new registers to RS_Spill. This was the last chance.
01565   setStage(LREdit.begin(), LREdit.end(), RS_Spill);
01566   return 0;
01567 }
01568 
01569 
01570 //===----------------------------------------------------------------------===//
01571 //                             Local Splitting
01572 //===----------------------------------------------------------------------===//
01573 
01574 
01575 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
01576 /// in order to use PhysReg between two entries in SA->UseSlots.
01577 ///
01578 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
01579 ///
01580 void RAGreedy::calcGapWeights(unsigned PhysReg,
01581                               SmallVectorImpl<float> &GapWeight) {
01582   assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
01583   const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
01584   ArrayRef<SlotIndex> Uses = SA->getUseSlots();
01585   const unsigned NumGaps = Uses.size()-1;
01586 
01587   // Start and end points for the interference check.
01588   SlotIndex StartIdx =
01589     BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr;
01590   SlotIndex StopIdx =
01591     BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr;
01592 
01593   GapWeight.assign(NumGaps, 0.0f);
01594 
01595   // Add interference from each overlapping register.
01596   for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
01597     if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units)
01598           .checkInterference())
01599       continue;
01600 
01601     // We know that VirtReg is a continuous interval from FirstInstr to
01602     // LastInstr, so we don't need InterferenceQuery.
01603     //
01604     // Interference that overlaps an instruction is counted in both gaps
01605     // surrounding the instruction. The exception is interference before
01606     // StartIdx and after StopIdx.
01607     //
01608     LiveIntervalUnion::SegmentIter IntI =
01609       Matrix->getLiveUnions()[*Units] .find(StartIdx);
01610     for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
01611       // Skip the gaps before IntI.
01612       while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
01613         if (++Gap == NumGaps)
01614           break;
01615       if (Gap == NumGaps)
01616         break;
01617 
01618       // Update the gaps covered by IntI.
01619       const float weight = IntI.value()->weight;
01620       for (; Gap != NumGaps; ++Gap) {
01621         GapWeight[Gap] = std::max(GapWeight[Gap], weight);
01622         if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
01623           break;
01624       }
01625       if (Gap == NumGaps)
01626         break;
01627     }
01628   }
01629 
01630   // Add fixed interference.
01631   for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
01632     const LiveRange &LR = LIS->getRegUnit(*Units);
01633     LiveRange::const_iterator I = LR.find(StartIdx);
01634     LiveRange::const_iterator E = LR.end();
01635 
01636     // Same loop as above. Mark any overlapped gaps as HUGE_VALF.
01637     for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) {
01638       while (Uses[Gap+1].getBoundaryIndex() < I->start)
01639         if (++Gap == NumGaps)
01640           break;
01641       if (Gap == NumGaps)
01642         break;
01643 
01644       for (; Gap != NumGaps; ++Gap) {
01645         GapWeight[Gap] = llvm::huge_valf;
01646         if (Uses[Gap+1].getBaseIndex() >= I->end)
01647           break;
01648       }
01649       if (Gap == NumGaps)
01650         break;
01651     }
01652   }
01653 }
01654 
01655 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
01656 /// basic block.
01657 ///
01658 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
01659                                  SmallVectorImpl<unsigned> &NewVRegs) {
01660   assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
01661   const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
01662 
01663   // Note that it is possible to have an interval that is live-in or live-out
01664   // while only covering a single block - A phi-def can use undef values from
01665   // predecessors, and the block could be a single-block loop.
01666   // We don't bother doing anything clever about such a case, we simply assume
01667   // that the interval is continuous from FirstInstr to LastInstr. We should
01668   // make sure that we don't do anything illegal to such an interval, though.
01669 
01670   ArrayRef<SlotIndex> Uses = SA->getUseSlots();
01671   if (Uses.size() <= 2)
01672     return 0;
01673   const unsigned NumGaps = Uses.size()-1;
01674 
01675   DEBUG({
01676     dbgs() << "tryLocalSplit: ";
01677     for (unsigned i = 0, e = Uses.size(); i != e; ++i)
01678       dbgs() << ' ' << Uses[i];
01679     dbgs() << '\n';
01680   });
01681 
01682   // If VirtReg is live across any register mask operands, compute a list of
01683   // gaps with register masks.
01684   SmallVector<unsigned, 8> RegMaskGaps;
01685   if (Matrix->checkRegMaskInterference(VirtReg)) {
01686     // Get regmask slots for the whole block.
01687     ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
01688     DEBUG(dbgs() << RMS.size() << " regmasks in block:");
01689     // Constrain to VirtReg's live range.
01690     unsigned ri = std::lower_bound(RMS.begin(), RMS.end(),
01691                                    Uses.front().getRegSlot()) - RMS.begin();
01692     unsigned re = RMS.size();
01693     for (unsigned i = 0; i != NumGaps && ri != re; ++i) {
01694       // Look for Uses[i] <= RMS <= Uses[i+1].
01695       assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i]));
01696       if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri]))
01697         continue;
01698       // Skip a regmask on the same instruction as the last use. It doesn't
01699       // overlap the live range.
01700       if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps)
01701         break;
01702       DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]);
01703       RegMaskGaps.push_back(i);
01704       // Advance ri to the next gap. A regmask on one of the uses counts in
01705       // both gaps.
01706       while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1]))
01707         ++ri;
01708     }
01709     DEBUG(dbgs() << '\n');
01710   }
01711 
01712   // Since we allow local split results to be split again, there is a risk of
01713   // creating infinite loops. It is tempting to require that the new live
01714   // ranges have less instructions than the original. That would guarantee
01715   // convergence, but it is too strict. A live range with 3 instructions can be
01716   // split 2+3 (including the COPY), and we want to allow that.
01717   //
01718   // Instead we use these rules:
01719   //
01720   // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
01721   //    noop split, of course).
01722   // 2. Require progress be made for ranges with getStage() == RS_Split2. All
01723   //    the new ranges must have fewer instructions than before the split.
01724   // 3. New ranges with the same number of instructions are marked RS_Split2,
01725   //    smaller ranges are marked RS_New.
01726   //
01727   // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
01728   // excessive splitting and infinite loops.
01729   //
01730   bool ProgressRequired = getStage(VirtReg) >= RS_Split2;
01731 
01732   // Best split candidate.
01733   unsigned BestBefore = NumGaps;
01734   unsigned BestAfter = 0;
01735   float BestDiff = 0;
01736 
01737   const float blockFreq =
01738     SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() *
01739     (1.0f / MBFI->getEntryFreq());
01740   SmallVector<float, 8> GapWeight;
01741 
01742   Order.rewind();
01743   while (unsigned PhysReg = Order.next()) {
01744     // Keep track of the largest spill weight that would need to be evicted in
01745     // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
01746     calcGapWeights(PhysReg, GapWeight);
01747 
01748     // Remove any gaps with regmask clobbers.
01749     if (Matrix->checkRegMaskInterference(VirtReg, PhysReg))
01750       for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i)
01751         GapWeight[RegMaskGaps[i]] = llvm::huge_valf;
01752 
01753     // Try to find the best sequence of gaps to close.
01754     // The new spill weight must be larger than any gap interference.
01755 
01756     // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
01757     unsigned SplitBefore = 0, SplitAfter = 1;
01758 
01759     // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
01760     // It is the spill weight that needs to be evicted.
01761     float MaxGap = GapWeight[0];
01762 
01763     for (;;) {
01764       // Live before/after split?
01765       const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
01766       const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
01767 
01768       DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
01769                    << Uses[SplitBefore] << '-' << Uses[SplitAfter]
01770                    << " i=" << MaxGap);
01771 
01772       // Stop before the interval gets so big we wouldn't be making progress.
01773       if (!LiveBefore && !LiveAfter) {
01774         DEBUG(dbgs() << " all\n");
01775         break;
01776       }
01777       // Should the interval be extended or shrunk?
01778       bool Shrink = true;
01779 
01780       // How many gaps would the new range have?
01781       unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
01782 
01783       // Legally, without causing looping?
01784       bool Legal = !ProgressRequired || NewGaps < NumGaps;
01785 
01786       if (Legal && MaxGap < llvm::huge_valf) {
01787         // Estimate the new spill weight. Each instruction reads or writes the
01788         // register. Conservatively assume there are no read-modify-write
01789         // instructions.
01790         //
01791         // Try to guess the size of the new interval.
01792         const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1),
01793                                  Uses[SplitBefore].distance(Uses[SplitAfter]) +
01794                                  (LiveBefore + LiveAfter)*SlotIndex::InstrDist);
01795         // Would this split be possible to allocate?
01796         // Never allocate all gaps, we wouldn't be making progress.
01797         DEBUG(dbgs() << " w=" << EstWeight);
01798         if (EstWeight * Hysteresis >= MaxGap) {
01799           Shrink = false;
01800           float Diff = EstWeight - MaxGap;
01801           if (Diff > BestDiff) {
01802             DEBUG(dbgs() << " (best)");
01803             BestDiff = Hysteresis * Diff;
01804             BestBefore = SplitBefore;
01805             BestAfter = SplitAfter;
01806           }
01807         }
01808       }
01809 
01810       // Try to shrink.
01811       if (Shrink) {
01812         if (++SplitBefore < SplitAfter) {
01813           DEBUG(dbgs() << " shrink\n");
01814           // Recompute the max when necessary.
01815           if (GapWeight[SplitBefore - 1] >= MaxGap) {
01816             MaxGap = GapWeight[SplitBefore];
01817             for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
01818               MaxGap = std::max(MaxGap, GapWeight[i]);
01819           }
01820           continue;
01821         }
01822         MaxGap = 0;
01823       }
01824 
01825       // Try to extend the interval.
01826       if (SplitAfter >= NumGaps) {
01827         DEBUG(dbgs() << " end\n");
01828         break;
01829       }
01830 
01831       DEBUG(dbgs() << " extend\n");
01832       MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
01833     }
01834   }
01835 
01836   // Didn't find any candidates?
01837   if (BestBefore == NumGaps)
01838     return 0;
01839 
01840   DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
01841                << '-' << Uses[BestAfter] << ", " << BestDiff
01842                << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
01843 
01844   LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
01845   SE->reset(LREdit);
01846 
01847   SE->openIntv();
01848   SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
01849   SlotIndex SegStop  = SE->leaveIntvAfter(Uses[BestAfter]);
01850   SE->useIntv(SegStart, SegStop);
01851   SmallVector<unsigned, 8> IntvMap;
01852   SE->finish(&IntvMap);
01853   DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS);
01854 
01855   // If the new range has the same number of instructions as before, mark it as
01856   // RS_Split2 so the next split will be forced to make progress. Otherwise,
01857   // leave the new intervals as RS_New so they can compete.
01858   bool LiveBefore = BestBefore != 0 || BI.LiveIn;
01859   bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
01860   unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
01861   if (NewGaps >= NumGaps) {
01862     DEBUG(dbgs() << "Tagging non-progress ranges: ");
01863     assert(!ProgressRequired && "Didn't make progress when it was required.");
01864     for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
01865       if (IntvMap[i] == 1) {
01866         setStage(LIS->getInterval(LREdit.get(i)), RS_Split2);
01867         DEBUG(dbgs() << PrintReg(LREdit.get(i)));
01868       }
01869     DEBUG(dbgs() << '\n');
01870   }
01871   ++NumLocalSplits;
01872 
01873   return 0;
01874 }
01875 
01876 //===----------------------------------------------------------------------===//
01877 //                          Live Range Splitting
01878 //===----------------------------------------------------------------------===//
01879 
01880 /// trySplit - Try to split VirtReg or one of its interferences, making it
01881 /// assignable.
01882 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
01883 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
01884                             SmallVectorImpl<unsigned>&NewVRegs) {
01885   // Ranges must be Split2 or less.
01886   if (getStage(VirtReg) >= RS_Spill)
01887     return 0;
01888 
01889   // Local intervals are handled separately.
01890   if (LIS->intervalIsInOneMBB(VirtReg)) {
01891     NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
01892     SA->analyze(&VirtReg);
01893     unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
01894     if (PhysReg || !NewVRegs.empty())
01895       return PhysReg;
01896     return tryInstructionSplit(VirtReg, Order, NewVRegs);
01897   }
01898 
01899   NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
01900 
01901   SA->analyze(&VirtReg);
01902 
01903   // FIXME: SplitAnalysis may repair broken live ranges coming from the
01904   // coalescer. That may cause the range to become allocatable which means that
01905   // tryRegionSplit won't be making progress. This check should be replaced with
01906   // an assertion when the coalescer is fixed.
01907   if (SA->didRepairRange()) {
01908     // VirtReg has changed, so all cached queries are invalid.
01909     Matrix->invalidateVirtRegs();
01910     if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
01911       return PhysReg;
01912   }
01913 
01914   // First try to split around a region spanning multiple blocks. RS_Split2
01915   // ranges already made dubious progress with region splitting, so they go
01916   // straight to single block splitting.
01917   if (getStage(VirtReg) < RS_Split2) {
01918     unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
01919     if (PhysReg || !NewVRegs.empty())
01920       return PhysReg;
01921   }
01922 
01923   // Then isolate blocks.
01924   return tryBlockSplit(VirtReg, Order, NewVRegs);
01925 }
01926 
01927 //===----------------------------------------------------------------------===//
01928 //                          Last Chance Recoloring
01929 //===----------------------------------------------------------------------===//
01930 
01931 /// mayRecolorAllInterferences - Check if the virtual registers that
01932 /// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be
01933 /// recolored to free \p PhysReg.
01934 /// When true is returned, \p RecoloringCandidates has been augmented with all
01935 /// the live intervals that need to be recolored in order to free \p PhysReg
01936 /// for \p VirtReg.
01937 /// \p FixedRegisters contains all the virtual registers that cannot be
01938 /// recolored.
01939 bool
01940 RAGreedy::mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
01941                                      SmallLISet &RecoloringCandidates,
01942                                      const SmallVirtRegSet &FixedRegisters) {
01943   const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg);
01944 
01945   for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
01946     LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
01947     // If there is LastChanceRecoloringMaxInterference or more interferences,
01948     // chances are one would not be recolorable.
01949     if (Q.collectInterferingVRegs(LastChanceRecoloringMaxInterference) >=
01950         LastChanceRecoloringMaxInterference && !ExhaustiveSearch) {
01951       DEBUG(dbgs() << "Early abort: too many interferences.\n");
01952       CutOffInfo |= CO_Interf;
01953       return false;
01954     }
01955     for (unsigned i = Q.interferingVRegs().size(); i; --i) {
01956       LiveInterval *Intf = Q.interferingVRegs()[i - 1];
01957       // If Intf is done and sit on the same register class as VirtReg,
01958       // it would not be recolorable as it is in the same state as VirtReg.
01959       if ((getStage(*Intf) == RS_Done &&
01960            MRI->getRegClass(Intf->reg) == CurRC) ||
01961           FixedRegisters.count(Intf->reg)) {
01962         DEBUG(dbgs() << "Early abort: the inteference is not recolorable.\n");
01963         return false;
01964       }
01965       RecoloringCandidates.insert(Intf);
01966     }
01967   }
01968   return true;
01969 }
01970 
01971 /// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring
01972 /// its interferences.
01973 /// Last chance recoloring chooses a color for \p VirtReg and recolors every
01974 /// virtual register that was using it. The recoloring process may recursively
01975 /// use the last chance recoloring. Therefore, when a virtual register has been
01976 /// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot
01977 /// be last-chance-recolored again during this recoloring "session".
01978 /// E.g.,
01979 /// Let
01980 /// vA can use {R1, R2    }
01981 /// vB can use {    R2, R3}
01982 /// vC can use {R1        }
01983 /// Where vA, vB, and vC cannot be split anymore (they are reloads for
01984 /// instance) and they all interfere.
01985 ///
01986 /// vA is assigned R1
01987 /// vB is assigned R2
01988 /// vC tries to evict vA but vA is already done.
01989 /// Regular register allocation fails.
01990 ///
01991 /// Last chance recoloring kicks in:
01992 /// vC does as if vA was evicted => vC uses R1.
01993 /// vC is marked as fixed.
01994 /// vA needs to find a color.
01995 /// None are available.
01996 /// vA cannot evict vC: vC is a fixed virtual register now.
01997 /// vA does as if vB was evicted => vA uses R2.
01998 /// vB needs to find a color.
01999 /// R3 is available.
02000 /// Recoloring => vC = R1, vA = R2, vB = R3
02001 ///
02002 /// \p Order defines the preferred allocation order for \p VirtReg.
02003 /// \p NewRegs will contain any new virtual register that have been created
02004 /// (split, spill) during the process and that must be assigned.
02005 /// \p FixedRegisters contains all the virtual registers that cannot be
02006 /// recolored.
02007 /// \p Depth gives the current depth of the last chance recoloring.
02008 /// \return a physical register that can be used for VirtReg or ~0u if none
02009 /// exists.
02010 unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
02011                                            AllocationOrder &Order,
02012                                            SmallVectorImpl<unsigned> &NewVRegs,
02013                                            SmallVirtRegSet &FixedRegisters,
02014                                            unsigned Depth) {
02015   DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n');
02016   // Ranges must be Done.
02017   assert((getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) &&
02018          "Last chance recoloring should really be last chance");
02019   // Set the max depth to LastChanceRecoloringMaxDepth.
02020   // We may want to reconsider that if we end up with a too large search space
02021   // for target with hundreds of registers.
02022   // Indeed, in that case we may want to cut the search space earlier.
02023   if (Depth >= LastChanceRecoloringMaxDepth && !ExhaustiveSearch) {
02024     DEBUG(dbgs() << "Abort because max depth has been reached.\n");
02025     CutOffInfo |= CO_Depth;
02026     return ~0u;
02027   }
02028 
02029   // Set of Live intervals that will need to be recolored.
02030   SmallLISet RecoloringCandidates;
02031   // Record the original mapping virtual register to physical register in case
02032   // the recoloring fails.
02033   DenseMap<unsigned, unsigned> VirtRegToPhysReg;
02034   // Mark VirtReg as fixed, i.e., it will not be recolored pass this point in
02035   // this recoloring "session".
02036   FixedRegisters.insert(VirtReg.reg);
02037 
02038   Order.rewind();
02039   while (unsigned PhysReg = Order.next()) {
02040     DEBUG(dbgs() << "Try to assign: " << VirtReg << " to "
02041                  << PrintReg(PhysReg, TRI) << '\n');
02042     RecoloringCandidates.clear();
02043     VirtRegToPhysReg.clear();
02044 
02045     // It is only possible to recolor virtual register interference.
02046     if (Matrix->checkInterference(VirtReg, PhysReg) >
02047         LiveRegMatrix::IK_VirtReg) {
02048       DEBUG(dbgs() << "Some inteferences are not with virtual registers.\n");
02049 
02050       continue;
02051     }
02052 
02053     // Early give up on this PhysReg if it is obvious we cannot recolor all
02054     // the interferences.
02055     if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates,
02056                                     FixedRegisters)) {
02057       DEBUG(dbgs() << "Some inteferences cannot be recolored.\n");
02058       continue;
02059     }
02060 
02061     // RecoloringCandidates contains all the virtual registers that interfer
02062     // with VirtReg on PhysReg (or one of its aliases).
02063     // Enqueue them for recoloring and perform the actual recoloring.
02064     PQueue RecoloringQueue;
02065     for (SmallLISet::iterator It = RecoloringCandidates.begin(),
02066                               EndIt = RecoloringCandidates.end();
02067          It != EndIt; ++It) {
02068       unsigned ItVirtReg = (*It)->reg;
02069       enqueue(RecoloringQueue, *It);
02070       assert(VRM->hasPhys(ItVirtReg) &&
02071              "Interferences are supposed to be with allocated vairables");
02072 
02073       // Record the current allocation.
02074       VirtRegToPhysReg[ItVirtReg] = VRM->getPhys(ItVirtReg);
02075       // unset the related struct.
02076       Matrix->unassign(**It);
02077     }
02078 
02079     // Do as if VirtReg was assigned to PhysReg so that the underlying
02080     // recoloring has the right information about the interferes and
02081     // available colors.
02082     Matrix->assign(VirtReg, PhysReg);
02083 
02084     // Save the current recoloring state.
02085     // If we cannot recolor all the interferences, we will have to start again
02086     // at this point for the next physical register.
02087     SmallVirtRegSet SaveFixedRegisters(FixedRegisters);
02088     if (tryRecoloringCandidates(RecoloringQueue, NewVRegs, FixedRegisters,
02089                                 Depth)) {
02090       // Do not mess up with the global assignment process.
02091       // I.e., VirtReg must be unassigned.
02092       Matrix->unassign(VirtReg);
02093       return PhysReg;
02094     }
02095 
02096     DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to "
02097                  << PrintReg(PhysReg, TRI) << '\n');
02098 
02099     // The recoloring attempt failed, undo the changes.
02100     FixedRegisters = SaveFixedRegisters;
02101     Matrix->unassign(VirtReg);
02102 
02103     for (SmallLISet::iterator It = RecoloringCandidates.begin(),
02104                               EndIt = RecoloringCandidates.end();
02105          It != EndIt; ++It) {
02106       unsigned ItVirtReg = (*It)->reg;
02107       if (VRM->hasPhys(ItVirtReg))
02108         Matrix->unassign(**It);
02109       Matrix->assign(**It, VirtRegToPhysReg[ItVirtReg]);
02110     }
02111   }
02112 
02113   // Last chance recoloring did not worked either, give up.
02114   return ~0u;
02115 }
02116 
02117 /// tryRecoloringCandidates - Try to assign a new color to every register
02118 /// in \RecoloringQueue.
02119 /// \p NewRegs will contain any new virtual register created during the
02120 /// recoloring process.
02121 /// \p FixedRegisters[in/out] contains all the registers that have been
02122 /// recolored.
02123 /// \return true if all virtual registers in RecoloringQueue were successfully
02124 /// recolored, false otherwise.
02125 bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
02126                                        SmallVectorImpl<unsigned> &NewVRegs,
02127                                        SmallVirtRegSet &FixedRegisters,
02128                                        unsigned Depth) {
02129   while (!RecoloringQueue.empty()) {
02130     LiveInterval *LI = dequeue(RecoloringQueue);
02131     DEBUG(dbgs() << "Try to recolor: " << *LI << '\n');
02132     unsigned PhysReg;
02133     PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1);
02134     if (PhysReg == ~0u || !PhysReg)
02135       return false;
02136     DEBUG(dbgs() << "Recoloring of " << *LI
02137                  << " succeeded with: " << PrintReg(PhysReg, TRI) << '\n');
02138     Matrix->assign(*LI, PhysReg);
02139     FixedRegisters.insert(LI->reg);
02140   }
02141   return true;
02142 }
02143 
02144 //===----------------------------------------------------------------------===//
02145 //                            Main Entry Point
02146 //===----------------------------------------------------------------------===//
02147 
02148 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
02149                                  SmallVectorImpl<unsigned> &NewVRegs) {
02150   CutOffInfo = CO_None;
02151   LLVMContext &Ctx = MF->getFunction()->getContext();
02152   SmallVirtRegSet FixedRegisters;
02153   unsigned Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters);
02154   if (Reg == ~0U && (CutOffInfo != CO_None)) {
02155     uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf);
02156     if (CutOffEncountered == CO_Depth)
02157       Ctx.emitError("register allocation failed: maximum depth for recoloring "
02158                     "reached. Use -fexhaustive-register-search to skip "
02159                     "cutoffs");
02160     else if (CutOffEncountered == CO_Interf)
02161       Ctx.emitError("register allocation failed: maximum interference for "
02162                     "recoloring reached. Use -fexhaustive-register-search "
02163                     "to skip cutoffs");
02164     else if (CutOffEncountered == (CO_Depth | CO_Interf))
02165       Ctx.emitError("register allocation failed: maximum interference and "
02166                     "depth for recoloring reached. Use "
02167                     "-fexhaustive-register-search to skip cutoffs");
02168   }
02169   return Reg;
02170 }
02171 
02172 /// Using a CSR for the first time has a cost because it causes push|pop
02173 /// to be added to prologue|epilogue. Splitting a cold section of the live
02174 /// range can have lower cost than using the CSR for the first time;
02175 /// Spilling a live range in the cold path can have lower cost than using
02176 /// the CSR for the first time. Returns the physical register if we decide
02177 /// to use the CSR; otherwise return 0.
02178 unsigned RAGreedy::tryAssignCSRFirstTime(LiveInterval &VirtReg,
02179                                          AllocationOrder &Order,
02180                                          unsigned PhysReg,
02181                                          unsigned &CostPerUseLimit,
02182                                          SmallVectorImpl<unsigned> &NewVRegs) {
02183   if (getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) {
02184     // We choose spill over using the CSR for the first time if the spill cost
02185     // is lower than CSRCost.
02186     SA->analyze(&VirtReg);
02187     if (calcSpillCost() >= CSRCost)
02188       return PhysReg;
02189 
02190     // We are going to spill, set CostPerUseLimit to 1 to make sure that
02191     // we will not use a callee-saved register in tryEvict.
02192     CostPerUseLimit = 1;
02193     return 0;
02194   }
02195   if (getStage(VirtReg) < RS_Split) {
02196     // We choose pre-splitting over using the CSR for the first time if
02197     // the cost of splitting is lower than CSRCost.
02198     SA->analyze(&VirtReg);
02199     unsigned NumCands = 0;
02200     BlockFrequency BestCost = CSRCost; // Don't modify CSRCost.
02201     unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
02202                                                  NumCands, true /*IgnoreCSR*/);
02203     if (BestCand == NoCand)
02204       // Use the CSR if we can't find a region split below CSRCost.
02205       return PhysReg;
02206 
02207     // Perform the actual pre-splitting.
02208     doRegionSplit(VirtReg, BestCand, false/*HasCompact*/, NewVRegs);
02209     return 0;
02210   }
02211   return PhysReg;
02212 }
02213 
02214 void RAGreedy::initializeCSRCost() {
02215   // We use the larger one out of the command-line option and the value report
02216   // by TRI.
02217   CSRCost = BlockFrequency(
02218       std::max((unsigned)CSRFirstTimeCost, TRI->getCSRFirstUseCost()));
02219   if (!CSRCost.getFrequency())
02220     return;
02221 
02222   // Raw cost is relative to Entry == 2^14; scale it appropriately.
02223   uint64_t ActualEntry = MBFI->getEntryFreq();
02224   if (!ActualEntry) {
02225     CSRCost = 0;
02226     return;
02227   }
02228   uint64_t FixedEntry = 1 << 14;
02229   if (ActualEntry < FixedEntry)
02230     CSRCost *= BranchProbability(ActualEntry, FixedEntry);
02231   else if (ActualEntry <= UINT32_MAX)
02232     // Invert the fraction and divide.
02233     CSRCost /= BranchProbability(FixedEntry, ActualEntry);
02234   else
02235     // Can't use BranchProbability in general, since it takes 32-bit numbers.
02236     CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry);
02237 }
02238 
02239 unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
02240                                      SmallVectorImpl<unsigned> &NewVRegs,
02241                                      SmallVirtRegSet &FixedRegisters,
02242                                      unsigned Depth) {
02243   unsigned CostPerUseLimit = ~0u;
02244   // First try assigning a free register.
02245   AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
02246   if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) {
02247     // We check other options if we are using a CSR for the first time.
02248     bool CSRFirstUse = false;
02249     if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
02250       if (!MRI->isPhysRegUsed(CSR))
02251         CSRFirstUse = true;
02252 
02253     // When NewVRegs is not empty, we may have made decisions such as evicting
02254     // a virtual register, go with the earlier decisions and use the physical
02255     // register.
02256     if (CSRCost.getFrequency() && CSRFirstUse && NewVRegs.empty()) {
02257       unsigned CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
02258                                               CostPerUseLimit, NewVRegs);
02259       if (CSRReg || !NewVRegs.empty())
02260         // Return now if we decide to use a CSR or create new vregs due to
02261         // pre-splitting.
02262         return CSRReg;
02263     } else
02264       return PhysReg;
02265   }
02266 
02267   LiveRangeStage Stage = getStage(VirtReg);
02268   DEBUG(dbgs() << StageName[Stage]
02269                << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
02270 
02271   // Try to evict a less worthy live range, but only for ranges from the primary
02272   // queue. The RS_Split ranges already failed to do this, and they should not
02273   // get a second chance until they have been split.
02274   if (Stage != RS_Split)
02275     if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit))
02276       return PhysReg;
02277 
02278   assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
02279 
02280   // The first time we see a live range, don't try to split or spill.
02281   // Wait until the second time, when all smaller ranges have been allocated.
02282   // This gives a better picture of the interference to split around.
02283   if (Stage < RS_Split) {
02284     setStage(VirtReg, RS_Split);
02285     DEBUG(dbgs() << "wait for second round\n");
02286     NewVRegs.push_back(VirtReg.reg);
02287     return 0;
02288   }
02289 
02290   // If we couldn't allocate a register from spilling, there is probably some
02291   // invalid inline assembly. The base class wil report it.
02292   if (Stage >= RS_Done || !VirtReg.isSpillable())
02293     return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters,
02294                                    Depth);
02295 
02296   // Try splitting VirtReg or interferences.
02297   unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
02298   if (PhysReg || !NewVRegs.empty())
02299     return PhysReg;
02300 
02301   // Finally spill VirtReg itself.
02302   NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
02303   LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
02304   spiller().spill(LRE);
02305   setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
02306 
02307   if (VerifyEnabled)
02308     MF->verify(this, "After spilling");
02309 
02310   // The live virtual register requesting allocation was spilled, so tell
02311   // the caller not to allocate anything during this round.
02312   return 0;
02313 }
02314 
02315 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
02316   DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
02317                << "********** Function: " << mf.getName() << '\n');
02318 
02319   MF = &mf;
02320   const TargetMachine &TM = MF->getTarget();
02321   TRI = TM.getSubtargetImpl()->getRegisterInfo();
02322   TII = TM.getSubtargetImpl()->getInstrInfo();
02323   RCI.runOnMachineFunction(mf);
02324 
02325   EnableLocalReassign = EnableLocalReassignment ||
02326     TM.getSubtargetImpl()->enableRALocalReassignment(TM.getOptLevel());
02327 
02328   if (VerifyEnabled)
02329     MF->verify(this, "Before greedy register allocator");
02330 
02331   RegAllocBase::init(getAnalysis<VirtRegMap>(),
02332                      getAnalysis<LiveIntervals>(),
02333                      getAnalysis<LiveRegMatrix>());
02334   Indexes = &getAnalysis<SlotIndexes>();
02335   MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
02336   DomTree = &getAnalysis<MachineDominatorTree>();
02337   SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
02338   Loops = &getAnalysis<MachineLoopInfo>();
02339   Bundles = &getAnalysis<EdgeBundles>();
02340   SpillPlacer = &getAnalysis<SpillPlacement>();
02341   DebugVars = &getAnalysis<LiveDebugVariables>();
02342 
02343   initializeCSRCost();
02344 
02345   calculateSpillWeightsAndHints(*LIS, mf, *Loops, *MBFI);
02346 
02347   DEBUG(LIS->dump());
02348 
02349   SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
02350   SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree, *MBFI));
02351   ExtraRegInfo.clear();
02352   ExtraRegInfo.resize(MRI->getNumVirtRegs());
02353   NextCascade = 1;
02354   IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI);
02355   GlobalCand.resize(32);  // This will grow as needed.
02356 
02357   allocatePhysRegs();
02358   releaseMemory();
02359   return true;
02360 }