LLVM API Documentation
00001 //===- BlockFrequencyImplInfo.cpp - Block Frequency Info Implementation ---===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // Loops should be simplified before this analysis. 00011 // 00012 //===----------------------------------------------------------------------===// 00013 00014 #include "llvm/Analysis/BlockFrequencyInfoImpl.h" 00015 #include "llvm/ADT/SCCIterator.h" 00016 #include "llvm/Support/raw_ostream.h" 00017 #include <deque> 00018 00019 using namespace llvm; 00020 using namespace llvm::bfi_detail; 00021 00022 #define DEBUG_TYPE "block-freq" 00023 00024 ScaledNumber<uint64_t> BlockMass::toScaled() const { 00025 if (isFull()) 00026 return ScaledNumber<uint64_t>(1, 0); 00027 return ScaledNumber<uint64_t>(getMass() + 1, -64); 00028 } 00029 00030 void BlockMass::dump() const { print(dbgs()); } 00031 00032 static char getHexDigit(int N) { 00033 assert(N < 16); 00034 if (N < 10) 00035 return '0' + N; 00036 return 'a' + N - 10; 00037 } 00038 raw_ostream &BlockMass::print(raw_ostream &OS) const { 00039 for (int Digits = 0; Digits < 16; ++Digits) 00040 OS << getHexDigit(Mass >> (60 - Digits * 4) & 0xf); 00041 return OS; 00042 } 00043 00044 namespace { 00045 00046 typedef BlockFrequencyInfoImplBase::BlockNode BlockNode; 00047 typedef BlockFrequencyInfoImplBase::Distribution Distribution; 00048 typedef BlockFrequencyInfoImplBase::Distribution::WeightList WeightList; 00049 typedef BlockFrequencyInfoImplBase::Scaled64 Scaled64; 00050 typedef BlockFrequencyInfoImplBase::LoopData LoopData; 00051 typedef BlockFrequencyInfoImplBase::Weight Weight; 00052 typedef BlockFrequencyInfoImplBase::FrequencyData FrequencyData; 00053 00054 /// \brief Dithering mass distributer. 00055 /// 00056 /// This class splits up a single mass into portions by weight, dithering to 00057 /// spread out error. No mass is lost. The dithering precision depends on the 00058 /// precision of the product of \a BlockMass and \a BranchProbability. 00059 /// 00060 /// The distribution algorithm follows. 00061 /// 00062 /// 1. Initialize by saving the sum of the weights in \a RemWeight and the 00063 /// mass to distribute in \a RemMass. 00064 /// 00065 /// 2. For each portion: 00066 /// 00067 /// 1. Construct a branch probability, P, as the portion's weight divided 00068 /// by the current value of \a RemWeight. 00069 /// 2. Calculate the portion's mass as \a RemMass times P. 00070 /// 3. Update \a RemWeight and \a RemMass at each portion by subtracting 00071 /// the current portion's weight and mass. 00072 struct DitheringDistributer { 00073 uint32_t RemWeight; 00074 BlockMass RemMass; 00075 00076 DitheringDistributer(Distribution &Dist, const BlockMass &Mass); 00077 00078 BlockMass takeMass(uint32_t Weight); 00079 }; 00080 00081 } // end namespace 00082 00083 DitheringDistributer::DitheringDistributer(Distribution &Dist, 00084 const BlockMass &Mass) { 00085 Dist.normalize(); 00086 RemWeight = Dist.Total; 00087 RemMass = Mass; 00088 } 00089 00090 BlockMass DitheringDistributer::takeMass(uint32_t Weight) { 00091 assert(Weight && "invalid weight"); 00092 assert(Weight <= RemWeight); 00093 BlockMass Mass = RemMass * BranchProbability(Weight, RemWeight); 00094 00095 // Decrement totals (dither). 00096 RemWeight -= Weight; 00097 RemMass -= Mass; 00098 return Mass; 00099 } 00100 00101 void Distribution::add(const BlockNode &Node, uint64_t Amount, 00102 Weight::DistType Type) { 00103 assert(Amount && "invalid weight of 0"); 00104 uint64_t NewTotal = Total + Amount; 00105 00106 // Check for overflow. It should be impossible to overflow twice. 00107 bool IsOverflow = NewTotal < Total; 00108 assert(!(DidOverflow && IsOverflow) && "unexpected repeated overflow"); 00109 DidOverflow |= IsOverflow; 00110 00111 // Update the total. 00112 Total = NewTotal; 00113 00114 // Save the weight. 00115 Weights.push_back(Weight(Type, Node, Amount)); 00116 } 00117 00118 static void combineWeight(Weight &W, const Weight &OtherW) { 00119 assert(OtherW.TargetNode.isValid()); 00120 if (!W.Amount) { 00121 W = OtherW; 00122 return; 00123 } 00124 assert(W.Type == OtherW.Type); 00125 assert(W.TargetNode == OtherW.TargetNode); 00126 assert(W.Amount < W.Amount + OtherW.Amount && "Unexpected overflow"); 00127 W.Amount += OtherW.Amount; 00128 } 00129 static void combineWeightsBySorting(WeightList &Weights) { 00130 // Sort so edges to the same node are adjacent. 00131 std::sort(Weights.begin(), Weights.end(), 00132 [](const Weight &L, 00133 const Weight &R) { return L.TargetNode < R.TargetNode; }); 00134 00135 // Combine adjacent edges. 00136 WeightList::iterator O = Weights.begin(); 00137 for (WeightList::const_iterator I = O, L = O, E = Weights.end(); I != E; 00138 ++O, (I = L)) { 00139 *O = *I; 00140 00141 // Find the adjacent weights to the same node. 00142 for (++L; L != E && I->TargetNode == L->TargetNode; ++L) 00143 combineWeight(*O, *L); 00144 } 00145 00146 // Erase extra entries. 00147 Weights.erase(O, Weights.end()); 00148 return; 00149 } 00150 static void combineWeightsByHashing(WeightList &Weights) { 00151 // Collect weights into a DenseMap. 00152 typedef DenseMap<BlockNode::IndexType, Weight> HashTable; 00153 HashTable Combined(NextPowerOf2(2 * Weights.size())); 00154 for (const Weight &W : Weights) 00155 combineWeight(Combined[W.TargetNode.Index], W); 00156 00157 // Check whether anything changed. 00158 if (Weights.size() == Combined.size()) 00159 return; 00160 00161 // Fill in the new weights. 00162 Weights.clear(); 00163 Weights.reserve(Combined.size()); 00164 for (const auto &I : Combined) 00165 Weights.push_back(I.second); 00166 } 00167 static void combineWeights(WeightList &Weights) { 00168 // Use a hash table for many successors to keep this linear. 00169 if (Weights.size() > 128) { 00170 combineWeightsByHashing(Weights); 00171 return; 00172 } 00173 00174 combineWeightsBySorting(Weights); 00175 } 00176 static uint64_t shiftRightAndRound(uint64_t N, int Shift) { 00177 assert(Shift >= 0); 00178 assert(Shift < 64); 00179 if (!Shift) 00180 return N; 00181 return (N >> Shift) + (UINT64_C(1) & N >> (Shift - 1)); 00182 } 00183 void Distribution::normalize() { 00184 // Early exit for termination nodes. 00185 if (Weights.empty()) 00186 return; 00187 00188 // Only bother if there are multiple successors. 00189 if (Weights.size() > 1) 00190 combineWeights(Weights); 00191 00192 // Early exit when combined into a single successor. 00193 if (Weights.size() == 1) { 00194 Total = 1; 00195 Weights.front().Amount = 1; 00196 return; 00197 } 00198 00199 // Determine how much to shift right so that the total fits into 32-bits. 00200 // 00201 // If we shift at all, shift by 1 extra. Otherwise, the lower limit of 1 00202 // for each weight can cause a 32-bit overflow. 00203 int Shift = 0; 00204 if (DidOverflow) 00205 Shift = 33; 00206 else if (Total > UINT32_MAX) 00207 Shift = 33 - countLeadingZeros(Total); 00208 00209 // Early exit if nothing needs to be scaled. 00210 if (!Shift) 00211 return; 00212 00213 // Recompute the total through accumulation (rather than shifting it) so that 00214 // it's accurate after shifting. 00215 Total = 0; 00216 00217 // Sum the weights to each node and shift right if necessary. 00218 for (Weight &W : Weights) { 00219 // Scale down below UINT32_MAX. Since Shift is larger than necessary, we 00220 // can round here without concern about overflow. 00221 assert(W.TargetNode.isValid()); 00222 W.Amount = std::max(UINT64_C(1), shiftRightAndRound(W.Amount, Shift)); 00223 assert(W.Amount <= UINT32_MAX); 00224 00225 // Update the total. 00226 Total += W.Amount; 00227 } 00228 assert(Total <= UINT32_MAX); 00229 } 00230 00231 void BlockFrequencyInfoImplBase::clear() { 00232 // Swap with a default-constructed std::vector, since std::vector<>::clear() 00233 // does not actually clear heap storage. 00234 std::vector<FrequencyData>().swap(Freqs); 00235 std::vector<WorkingData>().swap(Working); 00236 Loops.clear(); 00237 } 00238 00239 /// \brief Clear all memory not needed downstream. 00240 /// 00241 /// Releases all memory not used downstream. In particular, saves Freqs. 00242 static void cleanup(BlockFrequencyInfoImplBase &BFI) { 00243 std::vector<FrequencyData> SavedFreqs(std::move(BFI.Freqs)); 00244 BFI.clear(); 00245 BFI.Freqs = std::move(SavedFreqs); 00246 } 00247 00248 bool BlockFrequencyInfoImplBase::addToDist(Distribution &Dist, 00249 const LoopData *OuterLoop, 00250 const BlockNode &Pred, 00251 const BlockNode &Succ, 00252 uint64_t Weight) { 00253 if (!Weight) 00254 Weight = 1; 00255 00256 auto isLoopHeader = [&OuterLoop](const BlockNode &Node) { 00257 return OuterLoop && OuterLoop->isHeader(Node); 00258 }; 00259 00260 BlockNode Resolved = Working[Succ.Index].getResolvedNode(); 00261 00262 #ifndef NDEBUG 00263 auto debugSuccessor = [&](const char *Type) { 00264 dbgs() << " =>" 00265 << " [" << Type << "] weight = " << Weight; 00266 if (!isLoopHeader(Resolved)) 00267 dbgs() << ", succ = " << getBlockName(Succ); 00268 if (Resolved != Succ) 00269 dbgs() << ", resolved = " << getBlockName(Resolved); 00270 dbgs() << "\n"; 00271 }; 00272 (void)debugSuccessor; 00273 #endif 00274 00275 if (isLoopHeader(Resolved)) { 00276 DEBUG(debugSuccessor("backedge")); 00277 Dist.addBackedge(OuterLoop->getHeader(), Weight); 00278 return true; 00279 } 00280 00281 if (Working[Resolved.Index].getContainingLoop() != OuterLoop) { 00282 DEBUG(debugSuccessor(" exit ")); 00283 Dist.addExit(Resolved, Weight); 00284 return true; 00285 } 00286 00287 if (Resolved < Pred) { 00288 if (!isLoopHeader(Pred)) { 00289 // If OuterLoop is an irreducible loop, we can't actually handle this. 00290 assert((!OuterLoop || !OuterLoop->isIrreducible()) && 00291 "unhandled irreducible control flow"); 00292 00293 // Irreducible backedge. Abort. 00294 DEBUG(debugSuccessor("abort!!!")); 00295 return false; 00296 } 00297 00298 // If "Pred" is a loop header, then this isn't really a backedge; rather, 00299 // OuterLoop must be irreducible. These false backedges can come only from 00300 // secondary loop headers. 00301 assert(OuterLoop && OuterLoop->isIrreducible() && !isLoopHeader(Resolved) && 00302 "unhandled irreducible control flow"); 00303 } 00304 00305 DEBUG(debugSuccessor(" local ")); 00306 Dist.addLocal(Resolved, Weight); 00307 return true; 00308 } 00309 00310 bool BlockFrequencyInfoImplBase::addLoopSuccessorsToDist( 00311 const LoopData *OuterLoop, LoopData &Loop, Distribution &Dist) { 00312 // Copy the exit map into Dist. 00313 for (const auto &I : Loop.Exits) 00314 if (!addToDist(Dist, OuterLoop, Loop.getHeader(), I.first, 00315 I.second.getMass())) 00316 // Irreducible backedge. 00317 return false; 00318 00319 return true; 00320 } 00321 00322 /// \brief Get the maximum allowed loop scale. 00323 /// 00324 /// Gives the maximum number of estimated iterations allowed for a loop. Very 00325 /// large numbers cause problems downstream (even within 64-bits). 00326 static Scaled64 getMaxLoopScale() { return Scaled64(1, 12); } 00327 00328 /// \brief Compute the loop scale for a loop. 00329 void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) { 00330 // Compute loop scale. 00331 DEBUG(dbgs() << "compute-loop-scale: " << getLoopName(Loop) << "\n"); 00332 00333 // LoopScale == 1 / ExitMass 00334 // ExitMass == HeadMass - BackedgeMass 00335 BlockMass ExitMass = BlockMass::getFull() - Loop.BackedgeMass; 00336 00337 // Block scale stores the inverse of the scale. 00338 Loop.Scale = ExitMass.toScaled().inverse(); 00339 00340 DEBUG(dbgs() << " - exit-mass = " << ExitMass << " (" << BlockMass::getFull() 00341 << " - " << Loop.BackedgeMass << ")\n" 00342 << " - scale = " << Loop.Scale << "\n"); 00343 00344 if (Loop.Scale > getMaxLoopScale()) { 00345 Loop.Scale = getMaxLoopScale(); 00346 DEBUG(dbgs() << " - reduced-to-max-scale: " << getMaxLoopScale() << "\n"); 00347 } 00348 } 00349 00350 /// \brief Package up a loop. 00351 void BlockFrequencyInfoImplBase::packageLoop(LoopData &Loop) { 00352 DEBUG(dbgs() << "packaging-loop: " << getLoopName(Loop) << "\n"); 00353 00354 // Clear the subloop exits to prevent quadratic memory usage. 00355 for (const BlockNode &M : Loop.Nodes) { 00356 if (auto *Loop = Working[M.Index].getPackagedLoop()) 00357 Loop->Exits.clear(); 00358 DEBUG(dbgs() << " - node: " << getBlockName(M.Index) << "\n"); 00359 } 00360 Loop.IsPackaged = true; 00361 } 00362 00363 void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source, 00364 LoopData *OuterLoop, 00365 Distribution &Dist) { 00366 BlockMass Mass = Working[Source.Index].getMass(); 00367 DEBUG(dbgs() << " => mass: " << Mass << "\n"); 00368 00369 // Distribute mass to successors as laid out in Dist. 00370 DitheringDistributer D(Dist, Mass); 00371 00372 #ifndef NDEBUG 00373 auto debugAssign = [&](const BlockNode &T, const BlockMass &M, 00374 const char *Desc) { 00375 dbgs() << " => assign " << M << " (" << D.RemMass << ")"; 00376 if (Desc) 00377 dbgs() << " [" << Desc << "]"; 00378 if (T.isValid()) 00379 dbgs() << " to " << getBlockName(T); 00380 dbgs() << "\n"; 00381 }; 00382 (void)debugAssign; 00383 #endif 00384 00385 for (const Weight &W : Dist.Weights) { 00386 // Check for a local edge (non-backedge and non-exit). 00387 BlockMass Taken = D.takeMass(W.Amount); 00388 if (W.Type == Weight::Local) { 00389 Working[W.TargetNode.Index].getMass() += Taken; 00390 DEBUG(debugAssign(W.TargetNode, Taken, nullptr)); 00391 continue; 00392 } 00393 00394 // Backedges and exits only make sense if we're processing a loop. 00395 assert(OuterLoop && "backedge or exit outside of loop"); 00396 00397 // Check for a backedge. 00398 if (W.Type == Weight::Backedge) { 00399 OuterLoop->BackedgeMass += Taken; 00400 DEBUG(debugAssign(BlockNode(), Taken, "back")); 00401 continue; 00402 } 00403 00404 // This must be an exit. 00405 assert(W.Type == Weight::Exit); 00406 OuterLoop->Exits.push_back(std::make_pair(W.TargetNode, Taken)); 00407 DEBUG(debugAssign(W.TargetNode, Taken, "exit")); 00408 } 00409 } 00410 00411 static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI, 00412 const Scaled64 &Min, const Scaled64 &Max) { 00413 // Scale the Factor to a size that creates integers. Ideally, integers would 00414 // be scaled so that Max == UINT64_MAX so that they can be best 00415 // differentiated. However, the register allocator currently deals poorly 00416 // with large numbers. Instead, push Min up a little from 1 to give some 00417 // room to differentiate small, unequal numbers. 00418 // 00419 // TODO: fix issues downstream so that ScalingFactor can be 00420 // Scaled64(1,64)/Max. 00421 Scaled64 ScalingFactor = Min.inverse(); 00422 if ((Max / Min).lg() < 60) 00423 ScalingFactor <<= 3; 00424 00425 // Translate the floats to integers. 00426 DEBUG(dbgs() << "float-to-int: min = " << Min << ", max = " << Max 00427 << ", factor = " << ScalingFactor << "\n"); 00428 for (size_t Index = 0; Index < BFI.Freqs.size(); ++Index) { 00429 Scaled64 Scaled = BFI.Freqs[Index].Scaled * ScalingFactor; 00430 BFI.Freqs[Index].Integer = std::max(UINT64_C(1), Scaled.toInt<uint64_t>()); 00431 DEBUG(dbgs() << " - " << BFI.getBlockName(Index) << ": float = " 00432 << BFI.Freqs[Index].Scaled << ", scaled = " << Scaled 00433 << ", int = " << BFI.Freqs[Index].Integer << "\n"); 00434 } 00435 } 00436 00437 /// \brief Unwrap a loop package. 00438 /// 00439 /// Visits all the members of a loop, adjusting their BlockData according to 00440 /// the loop's pseudo-node. 00441 static void unwrapLoop(BlockFrequencyInfoImplBase &BFI, LoopData &Loop) { 00442 DEBUG(dbgs() << "unwrap-loop-package: " << BFI.getLoopName(Loop) 00443 << ": mass = " << Loop.Mass << ", scale = " << Loop.Scale 00444 << "\n"); 00445 Loop.Scale *= Loop.Mass.toScaled(); 00446 Loop.IsPackaged = false; 00447 DEBUG(dbgs() << " => combined-scale = " << Loop.Scale << "\n"); 00448 00449 // Propagate the head scale through the loop. Since members are visited in 00450 // RPO, the head scale will be updated by the loop scale first, and then the 00451 // final head scale will be used for updated the rest of the members. 00452 for (const BlockNode &N : Loop.Nodes) { 00453 const auto &Working = BFI.Working[N.Index]; 00454 Scaled64 &F = Working.isAPackage() ? Working.getPackagedLoop()->Scale 00455 : BFI.Freqs[N.Index].Scaled; 00456 Scaled64 New = Loop.Scale * F; 00457 DEBUG(dbgs() << " - " << BFI.getBlockName(N) << ": " << F << " => " << New 00458 << "\n"); 00459 F = New; 00460 } 00461 } 00462 00463 void BlockFrequencyInfoImplBase::unwrapLoops() { 00464 // Set initial frequencies from loop-local masses. 00465 for (size_t Index = 0; Index < Working.size(); ++Index) 00466 Freqs[Index].Scaled = Working[Index].Mass.toScaled(); 00467 00468 for (LoopData &Loop : Loops) 00469 unwrapLoop(*this, Loop); 00470 } 00471 00472 void BlockFrequencyInfoImplBase::finalizeMetrics() { 00473 // Unwrap loop packages in reverse post-order, tracking min and max 00474 // frequencies. 00475 auto Min = Scaled64::getLargest(); 00476 auto Max = Scaled64::getZero(); 00477 for (size_t Index = 0; Index < Working.size(); ++Index) { 00478 // Update min/max scale. 00479 Min = std::min(Min, Freqs[Index].Scaled); 00480 Max = std::max(Max, Freqs[Index].Scaled); 00481 } 00482 00483 // Convert to integers. 00484 convertFloatingToInteger(*this, Min, Max); 00485 00486 // Clean up data structures. 00487 cleanup(*this); 00488 00489 // Print out the final stats. 00490 DEBUG(dump()); 00491 } 00492 00493 BlockFrequency 00494 BlockFrequencyInfoImplBase::getBlockFreq(const BlockNode &Node) const { 00495 if (!Node.isValid()) 00496 return 0; 00497 return Freqs[Node.Index].Integer; 00498 } 00499 Scaled64 00500 BlockFrequencyInfoImplBase::getFloatingBlockFreq(const BlockNode &Node) const { 00501 if (!Node.isValid()) 00502 return Scaled64::getZero(); 00503 return Freqs[Node.Index].Scaled; 00504 } 00505 00506 std::string 00507 BlockFrequencyInfoImplBase::getBlockName(const BlockNode &Node) const { 00508 return std::string(); 00509 } 00510 std::string 00511 BlockFrequencyInfoImplBase::getLoopName(const LoopData &Loop) const { 00512 return getBlockName(Loop.getHeader()) + (Loop.isIrreducible() ? "**" : "*"); 00513 } 00514 00515 raw_ostream & 00516 BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS, 00517 const BlockNode &Node) const { 00518 return OS << getFloatingBlockFreq(Node); 00519 } 00520 00521 raw_ostream & 00522 BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS, 00523 const BlockFrequency &Freq) const { 00524 Scaled64 Block(Freq.getFrequency(), 0); 00525 Scaled64 Entry(getEntryFreq(), 0); 00526 00527 return OS << Block / Entry; 00528 } 00529 00530 void IrreducibleGraph::addNodesInLoop(const BFIBase::LoopData &OuterLoop) { 00531 Start = OuterLoop.getHeader(); 00532 Nodes.reserve(OuterLoop.Nodes.size()); 00533 for (auto N : OuterLoop.Nodes) 00534 addNode(N); 00535 indexNodes(); 00536 } 00537 void IrreducibleGraph::addNodesInFunction() { 00538 Start = 0; 00539 for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index) 00540 if (!BFI.Working[Index].isPackaged()) 00541 addNode(Index); 00542 indexNodes(); 00543 } 00544 void IrreducibleGraph::indexNodes() { 00545 for (auto &I : Nodes) 00546 Lookup[I.Node.Index] = &I; 00547 } 00548 void IrreducibleGraph::addEdge(IrrNode &Irr, const BlockNode &Succ, 00549 const BFIBase::LoopData *OuterLoop) { 00550 if (OuterLoop && OuterLoop->isHeader(Succ)) 00551 return; 00552 auto L = Lookup.find(Succ.Index); 00553 if (L == Lookup.end()) 00554 return; 00555 IrrNode &SuccIrr = *L->second; 00556 Irr.Edges.push_back(&SuccIrr); 00557 SuccIrr.Edges.push_front(&Irr); 00558 ++SuccIrr.NumIn; 00559 } 00560 00561 namespace llvm { 00562 template <> struct GraphTraits<IrreducibleGraph> { 00563 typedef bfi_detail::IrreducibleGraph GraphT; 00564 00565 typedef const GraphT::IrrNode NodeType; 00566 typedef GraphT::IrrNode::iterator ChildIteratorType; 00567 00568 static const NodeType *getEntryNode(const GraphT &G) { 00569 return G.StartIrr; 00570 } 00571 static ChildIteratorType child_begin(NodeType *N) { return N->succ_begin(); } 00572 static ChildIteratorType child_end(NodeType *N) { return N->succ_end(); } 00573 }; 00574 } 00575 00576 /// \brief Find extra irreducible headers. 00577 /// 00578 /// Find entry blocks and other blocks with backedges, which exist when \c G 00579 /// contains irreducible sub-SCCs. 00580 static void findIrreducibleHeaders( 00581 const BlockFrequencyInfoImplBase &BFI, 00582 const IrreducibleGraph &G, 00583 const std::vector<const IrreducibleGraph::IrrNode *> &SCC, 00584 LoopData::NodeList &Headers, LoopData::NodeList &Others) { 00585 // Map from nodes in the SCC to whether it's an entry block. 00586 SmallDenseMap<const IrreducibleGraph::IrrNode *, bool, 8> InSCC; 00587 00588 // InSCC also acts the set of nodes in the graph. Seed it. 00589 for (const auto *I : SCC) 00590 InSCC[I] = false; 00591 00592 for (auto I = InSCC.begin(), E = InSCC.end(); I != E; ++I) { 00593 auto &Irr = *I->first; 00594 for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) { 00595 if (InSCC.count(P)) 00596 continue; 00597 00598 // This is an entry block. 00599 I->second = true; 00600 Headers.push_back(Irr.Node); 00601 DEBUG(dbgs() << " => entry = " << BFI.getBlockName(Irr.Node) << "\n"); 00602 break; 00603 } 00604 } 00605 assert(Headers.size() >= 2 && "Should be irreducible"); 00606 if (Headers.size() == InSCC.size()) { 00607 // Every block is a header. 00608 std::sort(Headers.begin(), Headers.end()); 00609 return; 00610 } 00611 00612 // Look for extra headers from irreducible sub-SCCs. 00613 for (const auto &I : InSCC) { 00614 // Entry blocks are already headers. 00615 if (I.second) 00616 continue; 00617 00618 auto &Irr = *I.first; 00619 for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) { 00620 // Skip forward edges. 00621 if (P->Node < Irr.Node) 00622 continue; 00623 00624 // Skip predecessors from entry blocks. These can have inverted 00625 // ordering. 00626 if (InSCC.lookup(P)) 00627 continue; 00628 00629 // Store the extra header. 00630 Headers.push_back(Irr.Node); 00631 DEBUG(dbgs() << " => extra = " << BFI.getBlockName(Irr.Node) << "\n"); 00632 break; 00633 } 00634 if (Headers.back() == Irr.Node) 00635 // Added this as a header. 00636 continue; 00637 00638 // This is not a header. 00639 Others.push_back(Irr.Node); 00640 DEBUG(dbgs() << " => other = " << BFI.getBlockName(Irr.Node) << "\n"); 00641 } 00642 std::sort(Headers.begin(), Headers.end()); 00643 std::sort(Others.begin(), Others.end()); 00644 } 00645 00646 static void createIrreducibleLoop( 00647 BlockFrequencyInfoImplBase &BFI, const IrreducibleGraph &G, 00648 LoopData *OuterLoop, std::list<LoopData>::iterator Insert, 00649 const std::vector<const IrreducibleGraph::IrrNode *> &SCC) { 00650 // Translate the SCC into RPO. 00651 DEBUG(dbgs() << " - found-scc\n"); 00652 00653 LoopData::NodeList Headers; 00654 LoopData::NodeList Others; 00655 findIrreducibleHeaders(BFI, G, SCC, Headers, Others); 00656 00657 auto Loop = BFI.Loops.emplace(Insert, OuterLoop, Headers.begin(), 00658 Headers.end(), Others.begin(), Others.end()); 00659 00660 // Update loop hierarchy. 00661 for (const auto &N : Loop->Nodes) 00662 if (BFI.Working[N.Index].isLoopHeader()) 00663 BFI.Working[N.Index].Loop->Parent = &*Loop; 00664 else 00665 BFI.Working[N.Index].Loop = &*Loop; 00666 } 00667 00668 iterator_range<std::list<LoopData>::iterator> 00669 BlockFrequencyInfoImplBase::analyzeIrreducible( 00670 const IrreducibleGraph &G, LoopData *OuterLoop, 00671 std::list<LoopData>::iterator Insert) { 00672 assert((OuterLoop == nullptr) == (Insert == Loops.begin())); 00673 auto Prev = OuterLoop ? std::prev(Insert) : Loops.end(); 00674 00675 for (auto I = scc_begin(G); !I.isAtEnd(); ++I) { 00676 if (I->size() < 2) 00677 continue; 00678 00679 // Translate the SCC into RPO. 00680 createIrreducibleLoop(*this, G, OuterLoop, Insert, *I); 00681 } 00682 00683 if (OuterLoop) 00684 return make_range(std::next(Prev), Insert); 00685 return make_range(Loops.begin(), Insert); 00686 } 00687 00688 void 00689 BlockFrequencyInfoImplBase::updateLoopWithIrreducible(LoopData &OuterLoop) { 00690 OuterLoop.Exits.clear(); 00691 OuterLoop.BackedgeMass = BlockMass::getEmpty(); 00692 auto O = OuterLoop.Nodes.begin() + 1; 00693 for (auto I = O, E = OuterLoop.Nodes.end(); I != E; ++I) 00694 if (!Working[I->Index].isPackaged()) 00695 *O++ = *I; 00696 OuterLoop.Nodes.erase(O, OuterLoop.Nodes.end()); 00697 }