LLVM API Documentation
00001 //===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This implements the ScheduleDAG class, which is a base class used by 00011 // scheduling implementation classes. 00012 // 00013 //===----------------------------------------------------------------------===// 00014 00015 #include "ScheduleDAGSDNodes.h" 00016 #include "InstrEmitter.h" 00017 #include "SDNodeDbgValue.h" 00018 #include "llvm/ADT/DenseMap.h" 00019 #include "llvm/ADT/SmallPtrSet.h" 00020 #include "llvm/ADT/SmallSet.h" 00021 #include "llvm/ADT/SmallVector.h" 00022 #include "llvm/ADT/Statistic.h" 00023 #include "llvm/CodeGen/MachineInstrBuilder.h" 00024 #include "llvm/CodeGen/MachineRegisterInfo.h" 00025 #include "llvm/CodeGen/SelectionDAG.h" 00026 #include "llvm/MC/MCInstrItineraries.h" 00027 #include "llvm/Support/CommandLine.h" 00028 #include "llvm/Support/Debug.h" 00029 #include "llvm/Support/raw_ostream.h" 00030 #include "llvm/Target/TargetInstrInfo.h" 00031 #include "llvm/Target/TargetLowering.h" 00032 #include "llvm/Target/TargetMachine.h" 00033 #include "llvm/Target/TargetRegisterInfo.h" 00034 #include "llvm/Target/TargetSubtargetInfo.h" 00035 using namespace llvm; 00036 00037 #define DEBUG_TYPE "pre-RA-sched" 00038 00039 STATISTIC(LoadsClustered, "Number of loads clustered together"); 00040 00041 // This allows latency based scheduler to notice high latency instructions 00042 // without a target itinerary. The choise if number here has more to do with 00043 // balancing scheduler heursitics than with the actual machine latency. 00044 static cl::opt<int> HighLatencyCycles( 00045 "sched-high-latency-cycles", cl::Hidden, cl::init(10), 00046 cl::desc("Roughly estimate the number of cycles that 'long latency'" 00047 "instructions take for targets with no itinerary")); 00048 00049 ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf) 00050 : ScheduleDAG(mf), BB(nullptr), DAG(nullptr), 00051 InstrItins(mf.getSubtarget().getInstrItineraryData()) {} 00052 00053 /// Run - perform scheduling. 00054 /// 00055 void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb) { 00056 BB = bb; 00057 DAG = dag; 00058 00059 // Clear the scheduler's SUnit DAG. 00060 ScheduleDAG::clearDAG(); 00061 Sequence.clear(); 00062 00063 // Invoke the target's selection of scheduler. 00064 Schedule(); 00065 } 00066 00067 /// NewSUnit - Creates a new SUnit and return a ptr to it. 00068 /// 00069 SUnit *ScheduleDAGSDNodes::newSUnit(SDNode *N) { 00070 #ifndef NDEBUG 00071 const SUnit *Addr = nullptr; 00072 if (!SUnits.empty()) 00073 Addr = &SUnits[0]; 00074 #endif 00075 SUnits.push_back(SUnit(N, (unsigned)SUnits.size())); 00076 assert((Addr == nullptr || Addr == &SUnits[0]) && 00077 "SUnits std::vector reallocated on the fly!"); 00078 SUnits.back().OrigNode = &SUnits.back(); 00079 SUnit *SU = &SUnits.back(); 00080 const TargetLowering &TLI = DAG->getTargetLoweringInfo(); 00081 if (!N || 00082 (N->isMachineOpcode() && 00083 N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF)) 00084 SU->SchedulingPref = Sched::None; 00085 else 00086 SU->SchedulingPref = TLI.getSchedulingPreference(N); 00087 return SU; 00088 } 00089 00090 SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) { 00091 SUnit *SU = newSUnit(Old->getNode()); 00092 SU->OrigNode = Old->OrigNode; 00093 SU->Latency = Old->Latency; 00094 SU->isVRegCycle = Old->isVRegCycle; 00095 SU->isCall = Old->isCall; 00096 SU->isCallOp = Old->isCallOp; 00097 SU->isTwoAddress = Old->isTwoAddress; 00098 SU->isCommutable = Old->isCommutable; 00099 SU->hasPhysRegDefs = Old->hasPhysRegDefs; 00100 SU->hasPhysRegClobbers = Old->hasPhysRegClobbers; 00101 SU->isScheduleHigh = Old->isScheduleHigh; 00102 SU->isScheduleLow = Old->isScheduleLow; 00103 SU->SchedulingPref = Old->SchedulingPref; 00104 Old->isCloned = true; 00105 return SU; 00106 } 00107 00108 /// CheckForPhysRegDependency - Check if the dependency between def and use of 00109 /// a specified operand is a physical register dependency. If so, returns the 00110 /// register and the cost of copying the register. 00111 static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, 00112 const TargetRegisterInfo *TRI, 00113 const TargetInstrInfo *TII, 00114 unsigned &PhysReg, int &Cost) { 00115 if (Op != 2 || User->getOpcode() != ISD::CopyToReg) 00116 return; 00117 00118 unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg(); 00119 if (TargetRegisterInfo::isVirtualRegister(Reg)) 00120 return; 00121 00122 unsigned ResNo = User->getOperand(2).getResNo(); 00123 if (Def->isMachineOpcode()) { 00124 const MCInstrDesc &II = TII->get(Def->getMachineOpcode()); 00125 if (ResNo >= II.getNumDefs() && 00126 II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) { 00127 PhysReg = Reg; 00128 const TargetRegisterClass *RC = 00129 TRI->getMinimalPhysRegClass(Reg, Def->getValueType(ResNo)); 00130 Cost = RC->getCopyCost(); 00131 } 00132 } 00133 } 00134 00135 // Helper for AddGlue to clone node operands. 00136 static void CloneNodeWithValues(SDNode *N, SelectionDAG *DAG, 00137 SmallVectorImpl<EVT> &VTs, 00138 SDValue ExtraOper = SDValue()) { 00139 SmallVector<SDValue, 8> Ops; 00140 for (unsigned I = 0, E = N->getNumOperands(); I != E; ++I) 00141 Ops.push_back(N->getOperand(I)); 00142 00143 if (ExtraOper.getNode()) 00144 Ops.push_back(ExtraOper); 00145 00146 SDVTList VTList = DAG->getVTList(VTs); 00147 MachineSDNode::mmo_iterator Begin = nullptr, End = nullptr; 00148 MachineSDNode *MN = dyn_cast<MachineSDNode>(N); 00149 00150 // Store memory references. 00151 if (MN) { 00152 Begin = MN->memoperands_begin(); 00153 End = MN->memoperands_end(); 00154 } 00155 00156 DAG->MorphNodeTo(N, N->getOpcode(), VTList, Ops); 00157 00158 // Reset the memory references 00159 if (MN) 00160 MN->setMemRefs(Begin, End); 00161 } 00162 00163 static bool AddGlue(SDNode *N, SDValue Glue, bool AddGlue, SelectionDAG *DAG) { 00164 SmallVector<EVT, 4> VTs; 00165 SDNode *GlueDestNode = Glue.getNode(); 00166 00167 // Don't add glue from a node to itself. 00168 if (GlueDestNode == N) return false; 00169 00170 // Don't add a glue operand to something that already uses glue. 00171 if (GlueDestNode && 00172 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue) { 00173 return false; 00174 } 00175 // Don't add glue to something that already has a glue value. 00176 if (N->getValueType(N->getNumValues() - 1) == MVT::Glue) return false; 00177 00178 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I) 00179 VTs.push_back(N->getValueType(I)); 00180 00181 if (AddGlue) 00182 VTs.push_back(MVT::Glue); 00183 00184 CloneNodeWithValues(N, DAG, VTs, Glue); 00185 00186 return true; 00187 } 00188 00189 // Cleanup after unsuccessful AddGlue. Use the standard method of morphing the 00190 // node even though simply shrinking the value list is sufficient. 00191 static void RemoveUnusedGlue(SDNode *N, SelectionDAG *DAG) { 00192 assert((N->getValueType(N->getNumValues() - 1) == MVT::Glue && 00193 !N->hasAnyUseOfValue(N->getNumValues() - 1)) && 00194 "expected an unused glue value"); 00195 00196 SmallVector<EVT, 4> VTs; 00197 for (unsigned I = 0, E = N->getNumValues()-1; I != E; ++I) 00198 VTs.push_back(N->getValueType(I)); 00199 00200 CloneNodeWithValues(N, DAG, VTs); 00201 } 00202 00203 /// ClusterNeighboringLoads - Force nearby loads together by "gluing" them. 00204 /// This function finds loads of the same base and different offsets. If the 00205 /// offsets are not far apart (target specific), it add MVT::Glue inputs and 00206 /// outputs to ensure they are scheduled together and in order. This 00207 /// optimization may benefit some targets by improving cache locality. 00208 void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) { 00209 SDNode *Chain = nullptr; 00210 unsigned NumOps = Node->getNumOperands(); 00211 if (Node->getOperand(NumOps-1).getValueType() == MVT::Other) 00212 Chain = Node->getOperand(NumOps-1).getNode(); 00213 if (!Chain) 00214 return; 00215 00216 // Look for other loads of the same chain. Find loads that are loading from 00217 // the same base pointer and different offsets. 00218 SmallPtrSet<SDNode*, 16> Visited; 00219 SmallVector<int64_t, 4> Offsets; 00220 DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode. 00221 bool Cluster = false; 00222 SDNode *Base = Node; 00223 // This algorithm requires a reasonably low use count before finding a match 00224 // to avoid uselessly blowing up compile time in large blocks. 00225 unsigned UseCount = 0; 00226 for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end(); 00227 I != E && UseCount < 100; ++I, ++UseCount) { 00228 SDNode *User = *I; 00229 if (User == Node || !Visited.insert(User)) 00230 continue; 00231 int64_t Offset1, Offset2; 00232 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) || 00233 Offset1 == Offset2) 00234 // FIXME: Should be ok if they addresses are identical. But earlier 00235 // optimizations really should have eliminated one of the loads. 00236 continue; 00237 if (O2SMap.insert(std::make_pair(Offset1, Base)).second) 00238 Offsets.push_back(Offset1); 00239 O2SMap.insert(std::make_pair(Offset2, User)); 00240 Offsets.push_back(Offset2); 00241 if (Offset2 < Offset1) 00242 Base = User; 00243 Cluster = true; 00244 // Reset UseCount to allow more matches. 00245 UseCount = 0; 00246 } 00247 00248 if (!Cluster) 00249 return; 00250 00251 // Sort them in increasing order. 00252 std::sort(Offsets.begin(), Offsets.end()); 00253 00254 // Check if the loads are close enough. 00255 SmallVector<SDNode*, 4> Loads; 00256 unsigned NumLoads = 0; 00257 int64_t BaseOff = Offsets[0]; 00258 SDNode *BaseLoad = O2SMap[BaseOff]; 00259 Loads.push_back(BaseLoad); 00260 for (unsigned i = 1, e = Offsets.size(); i != e; ++i) { 00261 int64_t Offset = Offsets[i]; 00262 SDNode *Load = O2SMap[Offset]; 00263 if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,NumLoads)) 00264 break; // Stop right here. Ignore loads that are further away. 00265 Loads.push_back(Load); 00266 ++NumLoads; 00267 } 00268 00269 if (NumLoads == 0) 00270 return; 00271 00272 // Cluster loads by adding MVT::Glue outputs and inputs. This also 00273 // ensure they are scheduled in order of increasing addresses. 00274 SDNode *Lead = Loads[0]; 00275 SDValue InGlue = SDValue(nullptr, 0); 00276 if (AddGlue(Lead, InGlue, true, DAG)) 00277 InGlue = SDValue(Lead, Lead->getNumValues() - 1); 00278 for (unsigned I = 1, E = Loads.size(); I != E; ++I) { 00279 bool OutGlue = I < E - 1; 00280 SDNode *Load = Loads[I]; 00281 00282 // If AddGlue fails, we could leave an unsused glue value. This should not 00283 // cause any 00284 if (AddGlue(Load, InGlue, OutGlue, DAG)) { 00285 if (OutGlue) 00286 InGlue = SDValue(Load, Load->getNumValues() - 1); 00287 00288 ++LoadsClustered; 00289 } 00290 else if (!OutGlue && InGlue.getNode()) 00291 RemoveUnusedGlue(InGlue.getNode(), DAG); 00292 } 00293 } 00294 00295 /// ClusterNodes - Cluster certain nodes which should be scheduled together. 00296 /// 00297 void ScheduleDAGSDNodes::ClusterNodes() { 00298 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(), 00299 E = DAG->allnodes_end(); NI != E; ++NI) { 00300 SDNode *Node = &*NI; 00301 if (!Node || !Node->isMachineOpcode()) 00302 continue; 00303 00304 unsigned Opc = Node->getMachineOpcode(); 00305 const MCInstrDesc &MCID = TII->get(Opc); 00306 if (MCID.mayLoad()) 00307 // Cluster loads from "near" addresses into combined SUnits. 00308 ClusterNeighboringLoads(Node); 00309 } 00310 } 00311 00312 void ScheduleDAGSDNodes::BuildSchedUnits() { 00313 // During scheduling, the NodeId field of SDNode is used to map SDNodes 00314 // to their associated SUnits by holding SUnits table indices. A value 00315 // of -1 means the SDNode does not yet have an associated SUnit. 00316 unsigned NumNodes = 0; 00317 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(), 00318 E = DAG->allnodes_end(); NI != E; ++NI) { 00319 NI->setNodeId(-1); 00320 ++NumNodes; 00321 } 00322 00323 // Reserve entries in the vector for each of the SUnits we are creating. This 00324 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get 00325 // invalidated. 00326 // FIXME: Multiply by 2 because we may clone nodes during scheduling. 00327 // This is a temporary workaround. 00328 SUnits.reserve(NumNodes * 2); 00329 00330 // Add all nodes in depth first order. 00331 SmallVector<SDNode*, 64> Worklist; 00332 SmallPtrSet<SDNode*, 64> Visited; 00333 Worklist.push_back(DAG->getRoot().getNode()); 00334 Visited.insert(DAG->getRoot().getNode()); 00335 00336 SmallVector<SUnit*, 8> CallSUnits; 00337 while (!Worklist.empty()) { 00338 SDNode *NI = Worklist.pop_back_val(); 00339 00340 // Add all operands to the worklist unless they've already been added. 00341 for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i) 00342 if (Visited.insert(NI->getOperand(i).getNode())) 00343 Worklist.push_back(NI->getOperand(i).getNode()); 00344 00345 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate. 00346 continue; 00347 00348 // If this node has already been processed, stop now. 00349 if (NI->getNodeId() != -1) continue; 00350 00351 SUnit *NodeSUnit = newSUnit(NI); 00352 00353 // See if anything is glued to this node, if so, add them to glued 00354 // nodes. Nodes can have at most one glue input and one glue output. Glue 00355 // is required to be the last operand and result of a node. 00356 00357 // Scan up to find glued preds. 00358 SDNode *N = NI; 00359 while (N->getNumOperands() && 00360 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue) { 00361 N = N->getOperand(N->getNumOperands()-1).getNode(); 00362 assert(N->getNodeId() == -1 && "Node already inserted!"); 00363 N->setNodeId(NodeSUnit->NodeNum); 00364 if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall()) 00365 NodeSUnit->isCall = true; 00366 } 00367 00368 // Scan down to find any glued succs. 00369 N = NI; 00370 while (N->getValueType(N->getNumValues()-1) == MVT::Glue) { 00371 SDValue GlueVal(N, N->getNumValues()-1); 00372 00373 // There are either zero or one users of the Glue result. 00374 bool HasGlueUse = false; 00375 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); 00376 UI != E; ++UI) 00377 if (GlueVal.isOperandOf(*UI)) { 00378 HasGlueUse = true; 00379 assert(N->getNodeId() == -1 && "Node already inserted!"); 00380 N->setNodeId(NodeSUnit->NodeNum); 00381 N = *UI; 00382 if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall()) 00383 NodeSUnit->isCall = true; 00384 break; 00385 } 00386 if (!HasGlueUse) break; 00387 } 00388 00389 if (NodeSUnit->isCall) 00390 CallSUnits.push_back(NodeSUnit); 00391 00392 // Schedule zero-latency TokenFactor below any nodes that may increase the 00393 // schedule height. Otherwise, ancestors of the TokenFactor may appear to 00394 // have false stalls. 00395 if (NI->getOpcode() == ISD::TokenFactor) 00396 NodeSUnit->isScheduleLow = true; 00397 00398 // If there are glue operands involved, N is now the bottom-most node 00399 // of the sequence of nodes that are glued together. 00400 // Update the SUnit. 00401 NodeSUnit->setNode(N); 00402 assert(N->getNodeId() == -1 && "Node already inserted!"); 00403 N->setNodeId(NodeSUnit->NodeNum); 00404 00405 // Compute NumRegDefsLeft. This must be done before AddSchedEdges. 00406 InitNumRegDefsLeft(NodeSUnit); 00407 00408 // Assign the Latency field of NodeSUnit using target-provided information. 00409 computeLatency(NodeSUnit); 00410 } 00411 00412 // Find all call operands. 00413 while (!CallSUnits.empty()) { 00414 SUnit *SU = CallSUnits.pop_back_val(); 00415 for (const SDNode *SUNode = SU->getNode(); SUNode; 00416 SUNode = SUNode->getGluedNode()) { 00417 if (SUNode->getOpcode() != ISD::CopyToReg) 00418 continue; 00419 SDNode *SrcN = SUNode->getOperand(2).getNode(); 00420 if (isPassiveNode(SrcN)) continue; // Not scheduled. 00421 SUnit *SrcSU = &SUnits[SrcN->getNodeId()]; 00422 SrcSU->isCallOp = true; 00423 } 00424 } 00425 } 00426 00427 void ScheduleDAGSDNodes::AddSchedEdges() { 00428 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>(); 00429 00430 // Check to see if the scheduler cares about latencies. 00431 bool UnitLatencies = forceUnitLatencies(); 00432 00433 // Pass 2: add the preds, succs, etc. 00434 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) { 00435 SUnit *SU = &SUnits[su]; 00436 SDNode *MainNode = SU->getNode(); 00437 00438 if (MainNode->isMachineOpcode()) { 00439 unsigned Opc = MainNode->getMachineOpcode(); 00440 const MCInstrDesc &MCID = TII->get(Opc); 00441 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) { 00442 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) { 00443 SU->isTwoAddress = true; 00444 break; 00445 } 00446 } 00447 if (MCID.isCommutable()) 00448 SU->isCommutable = true; 00449 } 00450 00451 // Find all predecessors and successors of the group. 00452 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) { 00453 if (N->isMachineOpcode() && 00454 TII->get(N->getMachineOpcode()).getImplicitDefs()) { 00455 SU->hasPhysRegClobbers = true; 00456 unsigned NumUsed = InstrEmitter::CountResults(N); 00457 while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1)) 00458 --NumUsed; // Skip over unused values at the end. 00459 if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs()) 00460 SU->hasPhysRegDefs = true; 00461 } 00462 00463 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 00464 SDNode *OpN = N->getOperand(i).getNode(); 00465 if (isPassiveNode(OpN)) continue; // Not scheduled. 00466 SUnit *OpSU = &SUnits[OpN->getNodeId()]; 00467 assert(OpSU && "Node has no SUnit!"); 00468 if (OpSU == SU) continue; // In the same group. 00469 00470 EVT OpVT = N->getOperand(i).getValueType(); 00471 assert(OpVT != MVT::Glue && "Glued nodes should be in same sunit!"); 00472 bool isChain = OpVT == MVT::Other; 00473 00474 unsigned PhysReg = 0; 00475 int Cost = 1; 00476 // Determine if this is a physical register dependency. 00477 CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost); 00478 assert((PhysReg == 0 || !isChain) && 00479 "Chain dependence via physreg data?"); 00480 // FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler 00481 // emits a copy from the physical register to a virtual register unless 00482 // it requires a cross class copy (cost < 0). That means we are only 00483 // treating "expensive to copy" register dependency as physical register 00484 // dependency. This may change in the future though. 00485 if (Cost >= 0 && !StressSched) 00486 PhysReg = 0; 00487 00488 // If this is a ctrl dep, latency is 1. 00489 unsigned OpLatency = isChain ? 1 : OpSU->Latency; 00490 // Special-case TokenFactor chains as zero-latency. 00491 if(isChain && OpN->getOpcode() == ISD::TokenFactor) 00492 OpLatency = 0; 00493 00494 SDep Dep = isChain ? SDep(OpSU, SDep::Barrier) 00495 : SDep(OpSU, SDep::Data, PhysReg); 00496 Dep.setLatency(OpLatency); 00497 if (!isChain && !UnitLatencies) { 00498 computeOperandLatency(OpN, N, i, Dep); 00499 ST.adjustSchedDependency(OpSU, SU, Dep); 00500 } 00501 00502 if (!SU->addPred(Dep) && !Dep.isCtrl() && OpSU->NumRegDefsLeft > 1) { 00503 // Multiple register uses are combined in the same SUnit. For example, 00504 // we could have a set of glued nodes with all their defs consumed by 00505 // another set of glued nodes. Register pressure tracking sees this as 00506 // a single use, so to keep pressure balanced we reduce the defs. 00507 // 00508 // We can't tell (without more book-keeping) if this results from 00509 // glued nodes or duplicate operands. As long as we don't reduce 00510 // NumRegDefsLeft to zero, we handle the common cases well. 00511 --OpSU->NumRegDefsLeft; 00512 } 00513 } 00514 } 00515 } 00516 } 00517 00518 /// BuildSchedGraph - Build the SUnit graph from the selection dag that we 00519 /// are input. This SUnit graph is similar to the SelectionDAG, but 00520 /// excludes nodes that aren't interesting to scheduling, and represents 00521 /// glued together nodes with a single SUnit. 00522 void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) { 00523 // Cluster certain nodes which should be scheduled together. 00524 ClusterNodes(); 00525 // Populate the SUnits array. 00526 BuildSchedUnits(); 00527 // Compute all the scheduling dependencies between nodes. 00528 AddSchedEdges(); 00529 } 00530 00531 // Initialize NumNodeDefs for the current Node's opcode. 00532 void ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs() { 00533 // Check for phys reg copy. 00534 if (!Node) 00535 return; 00536 00537 if (!Node->isMachineOpcode()) { 00538 if (Node->getOpcode() == ISD::CopyFromReg) 00539 NodeNumDefs = 1; 00540 else 00541 NodeNumDefs = 0; 00542 return; 00543 } 00544 unsigned POpc = Node->getMachineOpcode(); 00545 if (POpc == TargetOpcode::IMPLICIT_DEF) { 00546 // No register need be allocated for this. 00547 NodeNumDefs = 0; 00548 return; 00549 } 00550 unsigned NRegDefs = SchedDAG->TII->get(Node->getMachineOpcode()).getNumDefs(); 00551 // Some instructions define regs that are not represented in the selection DAG 00552 // (e.g. unused flags). See tMOVi8. Make sure we don't access past NumValues. 00553 NodeNumDefs = std::min(Node->getNumValues(), NRegDefs); 00554 DefIdx = 0; 00555 } 00556 00557 // Construct a RegDefIter for this SUnit and find the first valid value. 00558 ScheduleDAGSDNodes::RegDefIter::RegDefIter(const SUnit *SU, 00559 const ScheduleDAGSDNodes *SD) 00560 : SchedDAG(SD), Node(SU->getNode()), DefIdx(0), NodeNumDefs(0) { 00561 InitNodeNumDefs(); 00562 Advance(); 00563 } 00564 00565 // Advance to the next valid value defined by the SUnit. 00566 void ScheduleDAGSDNodes::RegDefIter::Advance() { 00567 for (;Node;) { // Visit all glued nodes. 00568 for (;DefIdx < NodeNumDefs; ++DefIdx) { 00569 if (!Node->hasAnyUseOfValue(DefIdx)) 00570 continue; 00571 ValueType = Node->getSimpleValueType(DefIdx); 00572 ++DefIdx; 00573 return; // Found a normal regdef. 00574 } 00575 Node = Node->getGluedNode(); 00576 if (!Node) { 00577 return; // No values left to visit. 00578 } 00579 InitNodeNumDefs(); 00580 } 00581 } 00582 00583 void ScheduleDAGSDNodes::InitNumRegDefsLeft(SUnit *SU) { 00584 assert(SU->NumRegDefsLeft == 0 && "expect a new node"); 00585 for (RegDefIter I(SU, this); I.IsValid(); I.Advance()) { 00586 assert(SU->NumRegDefsLeft < USHRT_MAX && "overflow is ok but unexpected"); 00587 ++SU->NumRegDefsLeft; 00588 } 00589 } 00590 00591 void ScheduleDAGSDNodes::computeLatency(SUnit *SU) { 00592 SDNode *N = SU->getNode(); 00593 00594 // TokenFactor operands are considered zero latency, and some schedulers 00595 // (e.g. Top-Down list) may rely on the fact that operand latency is nonzero 00596 // whenever node latency is nonzero. 00597 if (N && N->getOpcode() == ISD::TokenFactor) { 00598 SU->Latency = 0; 00599 return; 00600 } 00601 00602 // Check to see if the scheduler cares about latencies. 00603 if (forceUnitLatencies()) { 00604 SU->Latency = 1; 00605 return; 00606 } 00607 00608 if (!InstrItins || InstrItins->isEmpty()) { 00609 if (N && N->isMachineOpcode() && 00610 TII->isHighLatencyDef(N->getMachineOpcode())) 00611 SU->Latency = HighLatencyCycles; 00612 else 00613 SU->Latency = 1; 00614 return; 00615 } 00616 00617 // Compute the latency for the node. We use the sum of the latencies for 00618 // all nodes glued together into this SUnit. 00619 SU->Latency = 0; 00620 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) 00621 if (N->isMachineOpcode()) 00622 SU->Latency += TII->getInstrLatency(InstrItins, N); 00623 } 00624 00625 void ScheduleDAGSDNodes::computeOperandLatency(SDNode *Def, SDNode *Use, 00626 unsigned OpIdx, SDep& dep) const{ 00627 // Check to see if the scheduler cares about latencies. 00628 if (forceUnitLatencies()) 00629 return; 00630 00631 if (dep.getKind() != SDep::Data) 00632 return; 00633 00634 unsigned DefIdx = Use->getOperand(OpIdx).getResNo(); 00635 if (Use->isMachineOpcode()) 00636 // Adjust the use operand index by num of defs. 00637 OpIdx += TII->get(Use->getMachineOpcode()).getNumDefs(); 00638 int Latency = TII->getOperandLatency(InstrItins, Def, DefIdx, Use, OpIdx); 00639 if (Latency > 1 && Use->getOpcode() == ISD::CopyToReg && 00640 !BB->succ_empty()) { 00641 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg(); 00642 if (TargetRegisterInfo::isVirtualRegister(Reg)) 00643 // This copy is a liveout value. It is likely coalesced, so reduce the 00644 // latency so not to penalize the def. 00645 // FIXME: need target specific adjustment here? 00646 Latency = (Latency > 1) ? Latency - 1 : 1; 00647 } 00648 if (Latency >= 0) 00649 dep.setLatency(Latency); 00650 } 00651 00652 void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const { 00653 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 00654 if (!SU->getNode()) { 00655 dbgs() << "PHYS REG COPY\n"; 00656 return; 00657 } 00658 00659 SU->getNode()->dump(DAG); 00660 dbgs() << "\n"; 00661 SmallVector<SDNode *, 4> GluedNodes; 00662 for (SDNode *N = SU->getNode()->getGluedNode(); N; N = N->getGluedNode()) 00663 GluedNodes.push_back(N); 00664 while (!GluedNodes.empty()) { 00665 dbgs() << " "; 00666 GluedNodes.back()->dump(DAG); 00667 dbgs() << "\n"; 00668 GluedNodes.pop_back(); 00669 } 00670 #endif 00671 } 00672 00673 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 00674 void ScheduleDAGSDNodes::dumpSchedule() const { 00675 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 00676 if (SUnit *SU = Sequence[i]) 00677 SU->dump(this); 00678 else 00679 dbgs() << "**** NOOP ****\n"; 00680 } 00681 } 00682 #endif 00683 00684 #ifndef NDEBUG 00685 /// VerifyScheduledSequence - Verify that all SUnits were scheduled and that 00686 /// their state is consistent with the nodes listed in Sequence. 00687 /// 00688 void ScheduleDAGSDNodes::VerifyScheduledSequence(bool isBottomUp) { 00689 unsigned ScheduledNodes = ScheduleDAG::VerifyScheduledDAG(isBottomUp); 00690 unsigned Noops = 0; 00691 for (unsigned i = 0, e = Sequence.size(); i != e; ++i) 00692 if (!Sequence[i]) 00693 ++Noops; 00694 assert(Sequence.size() - Noops == ScheduledNodes && 00695 "The number of nodes scheduled doesn't match the expected number!"); 00696 } 00697 #endif // NDEBUG 00698 00699 /// ProcessSDDbgValues - Process SDDbgValues associated with this node. 00700 static void 00701 ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter, 00702 SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders, 00703 DenseMap<SDValue, unsigned> &VRBaseMap, unsigned Order) { 00704 if (!N->getHasDebugValue()) 00705 return; 00706 00707 // Opportunistically insert immediate dbg_value uses, i.e. those with source 00708 // order number right after the N. 00709 MachineBasicBlock *BB = Emitter.getBlock(); 00710 MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos(); 00711 ArrayRef<SDDbgValue*> DVs = DAG->GetDbgValues(N); 00712 for (unsigned i = 0, e = DVs.size(); i != e; ++i) { 00713 if (DVs[i]->isInvalidated()) 00714 continue; 00715 unsigned DVOrder = DVs[i]->getOrder(); 00716 if (!Order || DVOrder == ++Order) { 00717 MachineInstr *DbgMI = Emitter.EmitDbgValue(DVs[i], VRBaseMap); 00718 if (DbgMI) { 00719 Orders.push_back(std::make_pair(DVOrder, DbgMI)); 00720 BB->insert(InsertPos, DbgMI); 00721 } 00722 DVs[i]->setIsInvalidated(); 00723 } 00724 } 00725 } 00726 00727 // ProcessSourceNode - Process nodes with source order numbers. These are added 00728 // to a vector which EmitSchedule uses to determine how to insert dbg_value 00729 // instructions in the right order. 00730 static void 00731 ProcessSourceNode(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter, 00732 DenseMap<SDValue, unsigned> &VRBaseMap, 00733 SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders, 00734 SmallSet<unsigned, 8> &Seen) { 00735 unsigned Order = N->getIROrder(); 00736 if (!Order || !Seen.insert(Order)) { 00737 // Process any valid SDDbgValues even if node does not have any order 00738 // assigned. 00739 ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, 0); 00740 return; 00741 } 00742 00743 MachineBasicBlock *BB = Emitter.getBlock(); 00744 if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI() || 00745 // Fast-isel may have inserted some instructions, in which case the 00746 // BB->back().isPHI() test will not fire when we want it to. 00747 std::prev(Emitter.getInsertPos())->isPHI()) { 00748 // Did not insert any instruction. 00749 Orders.push_back(std::make_pair(Order, (MachineInstr*)nullptr)); 00750 return; 00751 } 00752 00753 Orders.push_back(std::make_pair(Order, std::prev(Emitter.getInsertPos()))); 00754 ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, Order); 00755 } 00756 00757 void ScheduleDAGSDNodes:: 00758 EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap, 00759 MachineBasicBlock::iterator InsertPos) { 00760 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 00761 I != E; ++I) { 00762 if (I->isCtrl()) continue; // ignore chain preds 00763 if (I->getSUnit()->CopyDstRC) { 00764 // Copy to physical register. 00765 DenseMap<SUnit*, unsigned>::iterator VRI = VRBaseMap.find(I->getSUnit()); 00766 assert(VRI != VRBaseMap.end() && "Node emitted out of order - late"); 00767 // Find the destination physical register. 00768 unsigned Reg = 0; 00769 for (SUnit::const_succ_iterator II = SU->Succs.begin(), 00770 EE = SU->Succs.end(); II != EE; ++II) { 00771 if (II->isCtrl()) continue; // ignore chain preds 00772 if (II->getReg()) { 00773 Reg = II->getReg(); 00774 break; 00775 } 00776 } 00777 BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), Reg) 00778 .addReg(VRI->second); 00779 } else { 00780 // Copy from physical register. 00781 assert(I->getReg() && "Unknown physical register!"); 00782 unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC); 00783 bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase)).second; 00784 (void)isNew; // Silence compiler warning. 00785 assert(isNew && "Node emitted out of order - early"); 00786 BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), VRBase) 00787 .addReg(I->getReg()); 00788 } 00789 break; 00790 } 00791 } 00792 00793 /// EmitSchedule - Emit the machine code in scheduled order. Return the new 00794 /// InsertPos and MachineBasicBlock that contains this insertion 00795 /// point. ScheduleDAGSDNodes holds a BB pointer for convenience, but this does 00796 /// not necessarily refer to returned BB. The emitter may split blocks. 00797 MachineBasicBlock *ScheduleDAGSDNodes:: 00798 EmitSchedule(MachineBasicBlock::iterator &InsertPos) { 00799 InstrEmitter Emitter(BB, InsertPos); 00800 DenseMap<SDValue, unsigned> VRBaseMap; 00801 DenseMap<SUnit*, unsigned> CopyVRBaseMap; 00802 SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders; 00803 SmallSet<unsigned, 8> Seen; 00804 bool HasDbg = DAG->hasDebugValues(); 00805 00806 // If this is the first BB, emit byval parameter dbg_value's. 00807 if (HasDbg && BB->getParent()->begin() == MachineFunction::iterator(BB)) { 00808 SDDbgInfo::DbgIterator PDI = DAG->ByvalParmDbgBegin(); 00809 SDDbgInfo::DbgIterator PDE = DAG->ByvalParmDbgEnd(); 00810 for (; PDI != PDE; ++PDI) { 00811 MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap); 00812 if (DbgMI) 00813 BB->insert(InsertPos, DbgMI); 00814 } 00815 } 00816 00817 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 00818 SUnit *SU = Sequence[i]; 00819 if (!SU) { 00820 // Null SUnit* is a noop. 00821 TII->insertNoop(*Emitter.getBlock(), InsertPos); 00822 continue; 00823 } 00824 00825 // For pre-regalloc scheduling, create instructions corresponding to the 00826 // SDNode and any glued SDNodes and append them to the block. 00827 if (!SU->getNode()) { 00828 // Emit a copy. 00829 EmitPhysRegCopy(SU, CopyVRBaseMap, InsertPos); 00830 continue; 00831 } 00832 00833 SmallVector<SDNode *, 4> GluedNodes; 00834 for (SDNode *N = SU->getNode()->getGluedNode(); N; N = N->getGluedNode()) 00835 GluedNodes.push_back(N); 00836 while (!GluedNodes.empty()) { 00837 SDNode *N = GluedNodes.back(); 00838 Emitter.EmitNode(GluedNodes.back(), SU->OrigNode != SU, SU->isCloned, 00839 VRBaseMap); 00840 // Remember the source order of the inserted instruction. 00841 if (HasDbg) 00842 ProcessSourceNode(N, DAG, Emitter, VRBaseMap, Orders, Seen); 00843 GluedNodes.pop_back(); 00844 } 00845 Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned, 00846 VRBaseMap); 00847 // Remember the source order of the inserted instruction. 00848 if (HasDbg) 00849 ProcessSourceNode(SU->getNode(), DAG, Emitter, VRBaseMap, Orders, 00850 Seen); 00851 } 00852 00853 // Insert all the dbg_values which have not already been inserted in source 00854 // order sequence. 00855 if (HasDbg) { 00856 MachineBasicBlock::iterator BBBegin = BB->getFirstNonPHI(); 00857 00858 // Sort the source order instructions and use the order to insert debug 00859 // values. 00860 std::sort(Orders.begin(), Orders.end(), less_first()); 00861 00862 SDDbgInfo::DbgIterator DI = DAG->DbgBegin(); 00863 SDDbgInfo::DbgIterator DE = DAG->DbgEnd(); 00864 // Now emit the rest according to source order. 00865 unsigned LastOrder = 0; 00866 for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) { 00867 unsigned Order = Orders[i].first; 00868 MachineInstr *MI = Orders[i].second; 00869 // Insert all SDDbgValue's whose order(s) are before "Order". 00870 if (!MI) 00871 continue; 00872 for (; DI != DE && 00873 (*DI)->getOrder() >= LastOrder && (*DI)->getOrder() < Order; ++DI) { 00874 if ((*DI)->isInvalidated()) 00875 continue; 00876 MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap); 00877 if (DbgMI) { 00878 if (!LastOrder) 00879 // Insert to start of the BB (after PHIs). 00880 BB->insert(BBBegin, DbgMI); 00881 else { 00882 // Insert at the instruction, which may be in a different 00883 // block, if the block was split by a custom inserter. 00884 MachineBasicBlock::iterator Pos = MI; 00885 MI->getParent()->insert(Pos, DbgMI); 00886 } 00887 } 00888 } 00889 LastOrder = Order; 00890 } 00891 // Add trailing DbgValue's before the terminator. FIXME: May want to add 00892 // some of them before one or more conditional branches? 00893 SmallVector<MachineInstr*, 8> DbgMIs; 00894 while (DI != DE) { 00895 if (!(*DI)->isInvalidated()) 00896 if (MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap)) 00897 DbgMIs.push_back(DbgMI); 00898 ++DI; 00899 } 00900 00901 MachineBasicBlock *InsertBB = Emitter.getBlock(); 00902 MachineBasicBlock::iterator Pos = InsertBB->getFirstTerminator(); 00903 InsertBB->insert(Pos, DbgMIs.begin(), DbgMIs.end()); 00904 } 00905 00906 InsertPos = Emitter.getInsertPos(); 00907 return Emitter.getBlock(); 00908 } 00909 00910 /// Return the basic block label. 00911 std::string ScheduleDAGSDNodes::getDAGName() const { 00912 return "sunit-dag." + BB->getFullName(); 00913 }