LLVM API Documentation

SIInstrInfo.cpp
Go to the documentation of this file.
00001 //===-- SIInstrInfo.cpp - SI Instruction Information  ---------------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 /// \file
00011 /// \brief SI Implementation of TargetInstrInfo.
00012 //
00013 //===----------------------------------------------------------------------===//
00014 
00015 
00016 #include "SIInstrInfo.h"
00017 #include "AMDGPUTargetMachine.h"
00018 #include "SIDefines.h"
00019 #include "SIMachineFunctionInfo.h"
00020 #include "llvm/CodeGen/MachineFrameInfo.h"
00021 #include "llvm/CodeGen/MachineInstrBuilder.h"
00022 #include "llvm/CodeGen/MachineRegisterInfo.h"
00023 #include "llvm/IR/Function.h"
00024 #include "llvm/MC/MCInstrDesc.h"
00025 
00026 using namespace llvm;
00027 
00028 SIInstrInfo::SIInstrInfo(const AMDGPUSubtarget &st)
00029   : AMDGPUInstrInfo(st),
00030     RI(st) { }
00031 
00032 //===----------------------------------------------------------------------===//
00033 // TargetInstrInfo callbacks
00034 //===----------------------------------------------------------------------===//
00035 
00036 static unsigned getNumOperandsNoGlue(SDNode *Node) {
00037   unsigned N = Node->getNumOperands();
00038   while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
00039     --N;
00040   return N;
00041 }
00042 
00043 static SDValue findChainOperand(SDNode *Load) {
00044   SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1);
00045   assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node");
00046   return LastOp;
00047 }
00048 
00049 /// \brief Returns true if both nodes have the same value for the given
00050 ///        operand \p Op, or if both nodes do not have this operand.
00051 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
00052   unsigned Opc0 = N0->getMachineOpcode();
00053   unsigned Opc1 = N1->getMachineOpcode();
00054 
00055   int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName);
00056   int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName);
00057 
00058   if (Op0Idx == -1 && Op1Idx == -1)
00059     return true;
00060 
00061 
00062   if ((Op0Idx == -1 && Op1Idx != -1) ||
00063       (Op1Idx == -1 && Op0Idx != -1))
00064     return false;
00065 
00066   // getNamedOperandIdx returns the index for the MachineInstr's operands,
00067   // which includes the result as the first operand. We are indexing into the
00068   // MachineSDNode's operands, so we need to skip the result operand to get
00069   // the real index.
00070   --Op0Idx;
00071   --Op1Idx;
00072 
00073   return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
00074 }
00075 
00076 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
00077                                           int64_t &Offset0,
00078                                           int64_t &Offset1) const {
00079   if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode())
00080     return false;
00081 
00082   unsigned Opc0 = Load0->getMachineOpcode();
00083   unsigned Opc1 = Load1->getMachineOpcode();
00084 
00085   // Make sure both are actually loads.
00086   if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad())
00087     return false;
00088 
00089   if (isDS(Opc0) && isDS(Opc1)) {
00090     assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1));
00091 
00092     // Check base reg.
00093     if (Load0->getOperand(1) != Load1->getOperand(1))
00094       return false;
00095 
00096     // Check chain.
00097     if (findChainOperand(Load0) != findChainOperand(Load1))
00098       return false;
00099 
00100     // Skip read2 / write2 variants for simplicity.
00101     // TODO: We should report true if the used offsets are adjacent (excluded
00102     // st64 versions).
00103     if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 ||
00104         AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1)
00105       return false;
00106 
00107     Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue();
00108     Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue();
00109     return true;
00110   }
00111 
00112   if (isSMRD(Opc0) && isSMRD(Opc1)) {
00113     assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1));
00114 
00115     // Check base reg.
00116     if (Load0->getOperand(0) != Load1->getOperand(0))
00117       return false;
00118 
00119     // Check chain.
00120     if (findChainOperand(Load0) != findChainOperand(Load1))
00121       return false;
00122 
00123     Offset0 = cast<ConstantSDNode>(Load0->getOperand(1))->getZExtValue();
00124     Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getZExtValue();
00125     return true;
00126   }
00127 
00128   // MUBUF and MTBUF can access the same addresses.
00129   if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) {
00130 
00131     // MUBUF and MTBUF have vaddr at different indices.
00132     if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) ||
00133         findChainOperand(Load0) != findChainOperand(Load1) ||
00134         !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) ||
00135         !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc))
00136       return false;
00137 
00138     int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
00139     int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
00140 
00141     if (OffIdx0 == -1 || OffIdx1 == -1)
00142       return false;
00143 
00144     // getNamedOperandIdx returns the index for MachineInstrs.  Since they
00145     // inlcude the output in the operand list, but SDNodes don't, we need to
00146     // subtract the index by one.
00147     --OffIdx0;
00148     --OffIdx1;
00149 
00150     SDValue Off0 = Load0->getOperand(OffIdx0);
00151     SDValue Off1 = Load1->getOperand(OffIdx1);
00152 
00153     // The offset might be a FrameIndexSDNode.
00154     if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
00155       return false;
00156 
00157     Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
00158     Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
00159     return true;
00160   }
00161 
00162   return false;
00163 }
00164 
00165 static bool isStride64(unsigned Opc) {
00166   switch (Opc) {
00167   case AMDGPU::DS_READ2ST64_B32:
00168   case AMDGPU::DS_READ2ST64_B64:
00169   case AMDGPU::DS_WRITE2ST64_B32:
00170   case AMDGPU::DS_WRITE2ST64_B64:
00171     return true;
00172   default:
00173     return false;
00174   }
00175 }
00176 
00177 bool SIInstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt,
00178                                        unsigned &BaseReg, unsigned &Offset,
00179                                        const TargetRegisterInfo *TRI) const {
00180   unsigned Opc = LdSt->getOpcode();
00181   if (isDS(Opc)) {
00182     const MachineOperand *OffsetImm = getNamedOperand(*LdSt,
00183                                                       AMDGPU::OpName::offset);
00184     if (OffsetImm) {
00185       // Normal, single offset LDS instruction.
00186       const MachineOperand *AddrReg = getNamedOperand(*LdSt,
00187                                                       AMDGPU::OpName::addr);
00188 
00189       BaseReg = AddrReg->getReg();
00190       Offset = OffsetImm->getImm();
00191       return true;
00192     }
00193 
00194     // The 2 offset instructions use offset0 and offset1 instead. We can treat
00195     // these as a load with a single offset if the 2 offsets are consecutive. We
00196     // will use this for some partially aligned loads.
00197     const MachineOperand *Offset0Imm = getNamedOperand(*LdSt,
00198                                                        AMDGPU::OpName::offset0);
00199     const MachineOperand *Offset1Imm = getNamedOperand(*LdSt,
00200                                                        AMDGPU::OpName::offset1);
00201 
00202     uint8_t Offset0 = Offset0Imm->getImm();
00203     uint8_t Offset1 = Offset1Imm->getImm();
00204     assert(Offset1 > Offset0);
00205 
00206     if (Offset1 - Offset0 == 1) {
00207       // Each of these offsets is in element sized units, so we need to convert
00208       // to bytes of the individual reads.
00209 
00210       unsigned EltSize;
00211       if (LdSt->mayLoad())
00212         EltSize = getOpRegClass(*LdSt, 0)->getSize() / 2;
00213       else {
00214         assert(LdSt->mayStore());
00215         int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
00216         EltSize = getOpRegClass(*LdSt, Data0Idx)->getSize();
00217       }
00218 
00219       if (isStride64(Opc))
00220         EltSize *= 64;
00221 
00222       const MachineOperand *AddrReg = getNamedOperand(*LdSt,
00223                                                       AMDGPU::OpName::addr);
00224       BaseReg = AddrReg->getReg();
00225       Offset = EltSize * Offset0;
00226       return true;
00227     }
00228 
00229     return false;
00230   }
00231 
00232   if (isMUBUF(Opc) || isMTBUF(Opc)) {
00233     if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::soffset) != -1)
00234       return false;
00235 
00236     const MachineOperand *AddrReg = getNamedOperand(*LdSt,
00237                                                     AMDGPU::OpName::vaddr);
00238     if (!AddrReg)
00239       return false;
00240 
00241     const MachineOperand *OffsetImm = getNamedOperand(*LdSt,
00242                                                       AMDGPU::OpName::offset);
00243     BaseReg = AddrReg->getReg();
00244     Offset = OffsetImm->getImm();
00245     return true;
00246   }
00247 
00248   if (isSMRD(Opc)) {
00249     const MachineOperand *OffsetImm = getNamedOperand(*LdSt,
00250                                                       AMDGPU::OpName::offset);
00251     if (!OffsetImm)
00252       return false;
00253 
00254     const MachineOperand *SBaseReg = getNamedOperand(*LdSt,
00255                                                      AMDGPU::OpName::sbase);
00256     BaseReg = SBaseReg->getReg();
00257     Offset = OffsetImm->getImm();
00258     return true;
00259   }
00260 
00261   return false;
00262 }
00263 
00264 bool SIInstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
00265                                      MachineInstr *SecondLdSt,
00266                                      unsigned NumLoads) const {
00267   unsigned Opc0 = FirstLdSt->getOpcode();
00268   unsigned Opc1 = SecondLdSt->getOpcode();
00269 
00270   // TODO: This needs finer tuning
00271   if (NumLoads > 4)
00272     return false;
00273 
00274   if (isDS(Opc0) && isDS(Opc1))
00275     return true;
00276 
00277   if (isSMRD(Opc0) && isSMRD(Opc1))
00278     return true;
00279 
00280   if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1)))
00281     return true;
00282 
00283   return false;
00284 }
00285 
00286 void
00287 SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
00288                          MachineBasicBlock::iterator MI, DebugLoc DL,
00289                          unsigned DestReg, unsigned SrcReg,
00290                          bool KillSrc) const {
00291 
00292   // If we are trying to copy to or from SCC, there is a bug somewhere else in
00293   // the backend.  While it may be theoretically possible to do this, it should
00294   // never be necessary.
00295   assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
00296 
00297   static const int16_t Sub0_15[] = {
00298     AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
00299     AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
00300     AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
00301     AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
00302   };
00303 
00304   static const int16_t Sub0_7[] = {
00305     AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
00306     AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
00307   };
00308 
00309   static const int16_t Sub0_3[] = {
00310     AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
00311   };
00312 
00313   static const int16_t Sub0_2[] = {
00314     AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
00315   };
00316 
00317   static const int16_t Sub0_1[] = {
00318     AMDGPU::sub0, AMDGPU::sub1, 0
00319   };
00320 
00321   unsigned Opcode;
00322   const int16_t *SubIndices;
00323 
00324   if (AMDGPU::M0 == DestReg) {
00325     // Check if M0 isn't already set to this value
00326     for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
00327       I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
00328 
00329       if (!I->definesRegister(AMDGPU::M0))
00330         continue;
00331 
00332       unsigned Opc = I->getOpcode();
00333       if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
00334         break;
00335 
00336       if (!I->readsRegister(SrcReg))
00337         break;
00338 
00339       // The copy isn't necessary
00340       return;
00341     }
00342   }
00343 
00344   if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
00345     assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
00346     BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
00347             .addReg(SrcReg, getKillRegState(KillSrc));
00348     return;
00349 
00350   } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
00351     assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
00352     BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
00353             .addReg(SrcReg, getKillRegState(KillSrc));
00354     return;
00355 
00356   } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
00357     assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
00358     Opcode = AMDGPU::S_MOV_B32;
00359     SubIndices = Sub0_3;
00360 
00361   } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
00362     assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
00363     Opcode = AMDGPU::S_MOV_B32;
00364     SubIndices = Sub0_7;
00365 
00366   } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
00367     assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
00368     Opcode = AMDGPU::S_MOV_B32;
00369     SubIndices = Sub0_15;
00370 
00371   } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
00372     assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
00373            AMDGPU::SReg_32RegClass.contains(SrcReg));
00374     BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
00375             .addReg(SrcReg, getKillRegState(KillSrc));
00376     return;
00377 
00378   } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
00379     assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
00380            AMDGPU::SReg_64RegClass.contains(SrcReg));
00381     Opcode = AMDGPU::V_MOV_B32_e32;
00382     SubIndices = Sub0_1;
00383 
00384   } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
00385     assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
00386     Opcode = AMDGPU::V_MOV_B32_e32;
00387     SubIndices = Sub0_2;
00388 
00389   } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
00390     assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
00391            AMDGPU::SReg_128RegClass.contains(SrcReg));
00392     Opcode = AMDGPU::V_MOV_B32_e32;
00393     SubIndices = Sub0_3;
00394 
00395   } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
00396     assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
00397            AMDGPU::SReg_256RegClass.contains(SrcReg));
00398     Opcode = AMDGPU::V_MOV_B32_e32;
00399     SubIndices = Sub0_7;
00400 
00401   } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
00402     assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
00403            AMDGPU::SReg_512RegClass.contains(SrcReg));
00404     Opcode = AMDGPU::V_MOV_B32_e32;
00405     SubIndices = Sub0_15;
00406 
00407   } else {
00408     llvm_unreachable("Can't copy register!");
00409   }
00410 
00411   while (unsigned SubIdx = *SubIndices++) {
00412     MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
00413       get(Opcode), RI.getSubReg(DestReg, SubIdx));
00414 
00415     Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
00416 
00417     if (*SubIndices)
00418       Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
00419   }
00420 }
00421 
00422 unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
00423   int NewOpc;
00424 
00425   // Try to map original to commuted opcode
00426   if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
00427     return NewOpc;
00428 
00429   // Try to map commuted to original opcode
00430   if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
00431     return NewOpc;
00432 
00433   return Opcode;
00434 }
00435 
00436 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
00437                                       MachineBasicBlock::iterator MI,
00438                                       unsigned SrcReg, bool isKill,
00439                                       int FrameIndex,
00440                                       const TargetRegisterClass *RC,
00441                                       const TargetRegisterInfo *TRI) const {
00442   MachineFunction *MF = MBB.getParent();
00443   MachineFrameInfo *FrameInfo = MF->getFrameInfo();
00444   DebugLoc DL = MBB.findDebugLoc(MI);
00445 
00446   if (RI.hasVGPRs(RC)) {
00447     LLVMContext &Ctx = MF->getFunction()->getContext();
00448     Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Can't spill VGPR!");
00449     BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), AMDGPU::VGPR0)
00450             .addReg(SrcReg);
00451   } else if (RI.isSGPRClass(RC)) {
00452     // We are only allowed to create one new instruction when spilling
00453     // registers, so we need to use pseudo instruction for spilling
00454     // SGPRs.
00455     unsigned Opcode;
00456     switch (RC->getSize() * 8) {
00457     case 32:  Opcode = AMDGPU::SI_SPILL_S32_SAVE;  break;
00458     case 64:  Opcode = AMDGPU::SI_SPILL_S64_SAVE;  break;
00459     case 128: Opcode = AMDGPU::SI_SPILL_S128_SAVE; break;
00460     case 256: Opcode = AMDGPU::SI_SPILL_S256_SAVE; break;
00461     case 512: Opcode = AMDGPU::SI_SPILL_S512_SAVE; break;
00462     default: llvm_unreachable("Cannot spill register class");
00463     }
00464 
00465     FrameInfo->setObjectAlignment(FrameIndex, 4);
00466     BuildMI(MBB, MI, DL, get(Opcode))
00467             .addReg(SrcReg)
00468             .addFrameIndex(FrameIndex);
00469   } else {
00470     llvm_unreachable("VGPR spilling not supported");
00471   }
00472 }
00473 
00474 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
00475                                        MachineBasicBlock::iterator MI,
00476                                        unsigned DestReg, int FrameIndex,
00477                                        const TargetRegisterClass *RC,
00478                                        const TargetRegisterInfo *TRI) const {
00479   MachineFunction *MF = MBB.getParent();
00480   MachineFrameInfo *FrameInfo = MF->getFrameInfo();
00481   DebugLoc DL = MBB.findDebugLoc(MI);
00482 
00483   if (RI.hasVGPRs(RC)) {
00484     LLVMContext &Ctx = MF->getFunction()->getContext();
00485     Ctx.emitError("SIInstrInfo::loadRegToStackSlot - Can't retrieve spilled VGPR!");
00486     BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
00487             .addImm(0);
00488   } else if (RI.isSGPRClass(RC)){
00489     unsigned Opcode;
00490     switch(RC->getSize() * 8) {
00491     case 32:  Opcode = AMDGPU::SI_SPILL_S32_RESTORE; break;
00492     case 64:  Opcode = AMDGPU::SI_SPILL_S64_RESTORE;  break;
00493     case 128: Opcode = AMDGPU::SI_SPILL_S128_RESTORE; break;
00494     case 256: Opcode = AMDGPU::SI_SPILL_S256_RESTORE; break;
00495     case 512: Opcode = AMDGPU::SI_SPILL_S512_RESTORE; break;
00496     default: llvm_unreachable("Cannot spill register class");
00497     }
00498 
00499     FrameInfo->setObjectAlignment(FrameIndex, 4);
00500     BuildMI(MBB, MI, DL, get(Opcode), DestReg)
00501             .addFrameIndex(FrameIndex);
00502   } else {
00503     llvm_unreachable("VGPR spilling not supported");
00504   }
00505 }
00506 
00507 void SIInstrInfo::insertNOPs(MachineBasicBlock::iterator MI,
00508                              int Count) const {
00509   while (Count > 0) {
00510     int Arg;
00511     if (Count >= 8)
00512       Arg = 7;
00513     else
00514       Arg = Count - 1;
00515     Count -= 8;
00516     BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(AMDGPU::S_NOP))
00517             .addImm(Arg);
00518   }
00519 }
00520 
00521 bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
00522   MachineBasicBlock &MBB = *MI->getParent();
00523   DebugLoc DL = MBB.findDebugLoc(MI);
00524   switch (MI->getOpcode()) {
00525   default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
00526 
00527   case AMDGPU::SI_CONSTDATA_PTR: {
00528     unsigned Reg = MI->getOperand(0).getReg();
00529     unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
00530     unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
00531 
00532     BuildMI(MBB, MI, DL, get(AMDGPU::S_GETPC_B64), Reg);
00533 
00534     // Add 32-bit offset from this instruction to the start of the constant data.
00535     BuildMI(MBB, MI, DL, get(AMDGPU::S_ADD_U32), RegLo)
00536             .addReg(RegLo)
00537             .addTargetIndex(AMDGPU::TI_CONSTDATA_START)
00538             .addReg(AMDGPU::SCC, RegState::Define | RegState::Implicit);
00539     BuildMI(MBB, MI, DL, get(AMDGPU::S_ADDC_U32), RegHi)
00540             .addReg(RegHi)
00541             .addImm(0)
00542             .addReg(AMDGPU::SCC, RegState::Define | RegState::Implicit)
00543             .addReg(AMDGPU::SCC, RegState::Implicit);
00544     MI->eraseFromParent();
00545     break;
00546   }
00547   }
00548   return true;
00549 }
00550 
00551 MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
00552                                               bool NewMI) const {
00553 
00554   if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg())
00555     return nullptr;
00556 
00557   // Make sure it s legal to commute operands for VOP2.
00558   if (isVOP2(MI->getOpcode()) &&
00559       (!isOperandLegal(MI, 1, &MI->getOperand(2)) ||
00560        !isOperandLegal(MI, 2, &MI->getOperand(1))))
00561     return nullptr;
00562 
00563   if (!MI->getOperand(2).isReg()) {
00564     // XXX: Commute instructions with FPImm operands
00565     if (NewMI || MI->getOperand(2).isFPImm() ||
00566        (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) {
00567       return nullptr;
00568     }
00569 
00570     // XXX: Commute VOP3 instructions with abs and neg set .
00571     const MachineOperand *Abs = getNamedOperand(*MI, AMDGPU::OpName::abs);
00572     const MachineOperand *Neg = getNamedOperand(*MI, AMDGPU::OpName::neg);
00573     const MachineOperand *Src0Mods = getNamedOperand(*MI,
00574                                           AMDGPU::OpName::src0_modifiers);
00575     const MachineOperand *Src1Mods = getNamedOperand(*MI,
00576                                           AMDGPU::OpName::src1_modifiers);
00577     const MachineOperand *Src2Mods = getNamedOperand(*MI,
00578                                           AMDGPU::OpName::src2_modifiers);
00579 
00580     if ((Abs && Abs->getImm()) || (Neg && Neg->getImm()) ||
00581         (Src0Mods && Src0Mods->getImm()) || (Src1Mods && Src1Mods->getImm()) ||
00582         (Src2Mods && Src2Mods->getImm()))
00583       return nullptr;
00584 
00585     unsigned Reg = MI->getOperand(1).getReg();
00586     unsigned SubReg = MI->getOperand(1).getSubReg();
00587     MI->getOperand(1).ChangeToImmediate(MI->getOperand(2).getImm());
00588     MI->getOperand(2).ChangeToRegister(Reg, false);
00589     MI->getOperand(2).setSubReg(SubReg);
00590   } else {
00591     MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
00592   }
00593 
00594   if (MI)
00595     MI->setDesc(get(commuteOpcode(MI->getOpcode())));
00596 
00597   return MI;
00598 }
00599 
00600 MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB,
00601                                          MachineBasicBlock::iterator I,
00602                                          unsigned DstReg,
00603                                          unsigned SrcReg) const {
00604   return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32),
00605                  DstReg) .addReg(SrcReg);
00606 }
00607 
00608 bool SIInstrInfo::isMov(unsigned Opcode) const {
00609   switch(Opcode) {
00610   default: return false;
00611   case AMDGPU::S_MOV_B32:
00612   case AMDGPU::S_MOV_B64:
00613   case AMDGPU::V_MOV_B32_e32:
00614   case AMDGPU::V_MOV_B32_e64:
00615     return true;
00616   }
00617 }
00618 
00619 bool
00620 SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
00621   return RC != &AMDGPU::EXECRegRegClass;
00622 }
00623 
00624 bool
00625 SIInstrInfo::isTriviallyReMaterializable(const MachineInstr *MI,
00626                                          AliasAnalysis *AA) const {
00627   switch(MI->getOpcode()) {
00628   default: return AMDGPUInstrInfo::isTriviallyReMaterializable(MI, AA);
00629   case AMDGPU::S_MOV_B32:
00630   case AMDGPU::S_MOV_B64:
00631   case AMDGPU::V_MOV_B32_e32:
00632     return MI->getOperand(1).isImm();
00633   }
00634 }
00635 
00636 namespace llvm {
00637 namespace AMDGPU {
00638 // Helper function generated by tablegen.  We are wrapping this with
00639 // an SIInstrInfo function that returns bool rather than int.
00640 int isDS(uint16_t Opcode);
00641 }
00642 }
00643 
00644 bool SIInstrInfo::isDS(uint16_t Opcode) const {
00645   return ::AMDGPU::isDS(Opcode) != -1;
00646 }
00647 
00648 bool SIInstrInfo::isMIMG(uint16_t Opcode) const {
00649   return get(Opcode).TSFlags & SIInstrFlags::MIMG;
00650 }
00651 
00652 bool SIInstrInfo::isSMRD(uint16_t Opcode) const {
00653   return get(Opcode).TSFlags & SIInstrFlags::SMRD;
00654 }
00655 
00656 bool SIInstrInfo::isMUBUF(uint16_t Opcode) const {
00657   return get(Opcode).TSFlags & SIInstrFlags::MUBUF;
00658 }
00659 
00660 bool SIInstrInfo::isMTBUF(uint16_t Opcode) const {
00661   return get(Opcode).TSFlags & SIInstrFlags::MTBUF;
00662 }
00663 
00664 bool SIInstrInfo::isFLAT(uint16_t Opcode) const {
00665   return get(Opcode).TSFlags & SIInstrFlags::FLAT;
00666 }
00667 
00668 bool SIInstrInfo::isVOP1(uint16_t Opcode) const {
00669   return get(Opcode).TSFlags & SIInstrFlags::VOP1;
00670 }
00671 
00672 bool SIInstrInfo::isVOP2(uint16_t Opcode) const {
00673   return get(Opcode).TSFlags & SIInstrFlags::VOP2;
00674 }
00675 
00676 bool SIInstrInfo::isVOP3(uint16_t Opcode) const {
00677   return get(Opcode).TSFlags & SIInstrFlags::VOP3;
00678 }
00679 
00680 bool SIInstrInfo::isVOPC(uint16_t Opcode) const {
00681   return get(Opcode).TSFlags & SIInstrFlags::VOPC;
00682 }
00683 
00684 bool SIInstrInfo::isSALUInstr(const MachineInstr &MI) const {
00685   return get(MI.getOpcode()).TSFlags & SIInstrFlags::SALU;
00686 }
00687 
00688 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
00689   int32_t Val = Imm.getSExtValue();
00690   if (Val >= -16 && Val <= 64)
00691     return true;
00692 
00693   // The actual type of the operand does not seem to matter as long
00694   // as the bits match one of the inline immediate values.  For example:
00695   //
00696   // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
00697   // so it is a legal inline immediate.
00698   //
00699   // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
00700   // floating-point, so it is a legal inline immediate.
00701 
00702   return (APInt::floatToBits(0.0f) == Imm) ||
00703          (APInt::floatToBits(1.0f) == Imm) ||
00704          (APInt::floatToBits(-1.0f) == Imm) ||
00705          (APInt::floatToBits(0.5f) == Imm) ||
00706          (APInt::floatToBits(-0.5f) == Imm) ||
00707          (APInt::floatToBits(2.0f) == Imm) ||
00708          (APInt::floatToBits(-2.0f) == Imm) ||
00709          (APInt::floatToBits(4.0f) == Imm) ||
00710          (APInt::floatToBits(-4.0f) == Imm);
00711 }
00712 
00713 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
00714   if (MO.isImm())
00715     return isInlineConstant(APInt(32, MO.getImm(), true));
00716 
00717   if (MO.isFPImm()) {
00718     APFloat FpImm = MO.getFPImm()->getValueAPF();
00719     return isInlineConstant(FpImm.bitcastToAPInt());
00720   }
00721 
00722   return false;
00723 }
00724 
00725 bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
00726   return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
00727 }
00728 
00729 static bool compareMachineOp(const MachineOperand &Op0,
00730                              const MachineOperand &Op1) {
00731   if (Op0.getType() != Op1.getType())
00732     return false;
00733 
00734   switch (Op0.getType()) {
00735   case MachineOperand::MO_Register:
00736     return Op0.getReg() == Op1.getReg();
00737   case MachineOperand::MO_Immediate:
00738     return Op0.getImm() == Op1.getImm();
00739   case MachineOperand::MO_FPImmediate:
00740     return Op0.getFPImm() == Op1.getFPImm();
00741   default:
00742     llvm_unreachable("Didn't expect to be comparing these operand types");
00743   }
00744 }
00745 
00746 bool SIInstrInfo::isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
00747                                  const MachineOperand &MO) const {
00748   const MCOperandInfo &OpInfo = get(MI->getOpcode()).OpInfo[OpNo];
00749 
00750   assert(MO.isImm() || MO.isFPImm());
00751 
00752   if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
00753     return true;
00754 
00755   if (OpInfo.RegClass < 0)
00756     return false;
00757 
00758   return RI.regClassCanUseImmediate(OpInfo.RegClass);
00759 }
00760 
00761 bool SIInstrInfo::canFoldOffset(unsigned OffsetSize, unsigned AS) {
00762   switch (AS) {
00763   case AMDGPUAS::GLOBAL_ADDRESS: {
00764     // MUBUF instructions a 12-bit offset in bytes.
00765     return isUInt<12>(OffsetSize);
00766   }
00767   case AMDGPUAS::CONSTANT_ADDRESS: {
00768     // SMRD instructions have an 8-bit offset in dwords.
00769     return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4);
00770   }
00771   case AMDGPUAS::LOCAL_ADDRESS:
00772   case AMDGPUAS::REGION_ADDRESS: {
00773     // The single offset versions have a 16-bit offset in bytes.
00774     return isUInt<16>(OffsetSize);
00775   }
00776   case AMDGPUAS::PRIVATE_ADDRESS:
00777     // Indirect register addressing does not use any offsets.
00778   default:
00779     return 0;
00780   }
00781 }
00782 
00783 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
00784   return AMDGPU::getVOPe32(Opcode) != -1;
00785 }
00786 
00787 bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
00788   // The src0_modifier operand is present on all instructions
00789   // that have modifiers.
00790 
00791   return AMDGPU::getNamedOperandIdx(Opcode,
00792                                     AMDGPU::OpName::src0_modifiers) != -1;
00793 }
00794 
00795 bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
00796                                     StringRef &ErrInfo) const {
00797   uint16_t Opcode = MI->getOpcode();
00798   int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
00799   int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
00800   int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
00801 
00802   // Make sure the number of operands is correct.
00803   const MCInstrDesc &Desc = get(Opcode);
00804   if (!Desc.isVariadic() &&
00805       Desc.getNumOperands() != MI->getNumExplicitOperands()) {
00806      ErrInfo = "Instruction has wrong number of operands.";
00807      return false;
00808   }
00809 
00810   // Make sure the register classes are correct
00811   for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
00812     switch (Desc.OpInfo[i].OperandType) {
00813     case MCOI::OPERAND_REGISTER: {
00814       int RegClass = Desc.OpInfo[i].RegClass;
00815       if (!RI.regClassCanUseImmediate(RegClass) &&
00816           (MI->getOperand(i).isImm() || MI->getOperand(i).isFPImm())) {
00817         // Handle some special cases:
00818         // Src0 can of VOP1, VOP2, VOPC can be an immediate no matter what
00819         // the register class.
00820         if (i != Src0Idx || (!isVOP1(Opcode) && !isVOP2(Opcode) &&
00821                                   !isVOPC(Opcode))) {
00822           ErrInfo = "Expected register, but got immediate";
00823           return false;
00824         }
00825       }
00826     }
00827       break;
00828     case MCOI::OPERAND_IMMEDIATE:
00829       // Check if this operand is an immediate.
00830       // FrameIndex operands will be replaced by immediates, so they are
00831       // allowed.
00832       if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFPImm() &&
00833           !MI->getOperand(i).isFI()) {
00834         ErrInfo = "Expected immediate, but got non-immediate";
00835         return false;
00836       }
00837       // Fall-through
00838     default:
00839       continue;
00840     }
00841 
00842     if (!MI->getOperand(i).isReg())
00843       continue;
00844 
00845     int RegClass = Desc.OpInfo[i].RegClass;
00846     if (RegClass != -1) {
00847       unsigned Reg = MI->getOperand(i).getReg();
00848       if (TargetRegisterInfo::isVirtualRegister(Reg))
00849         continue;
00850 
00851       const TargetRegisterClass *RC = RI.getRegClass(RegClass);
00852       if (!RC->contains(Reg)) {
00853         ErrInfo = "Operand has incorrect register class.";
00854         return false;
00855       }
00856     }
00857   }
00858 
00859 
00860   // Verify VOP*
00861   if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
00862     unsigned ConstantBusCount = 0;
00863     unsigned SGPRUsed = AMDGPU::NoRegister;
00864     for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
00865       const MachineOperand &MO = MI->getOperand(i);
00866       if (MO.isReg() && MO.isUse() &&
00867           !TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
00868 
00869         // EXEC register uses the constant bus.
00870         if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
00871           ++ConstantBusCount;
00872 
00873         // FLAT_SCR is just an SGPR pair.
00874         if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR))
00875           ++ConstantBusCount;
00876 
00877         // SGPRs use the constant bus
00878         if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
00879             (!MO.isImplicit() &&
00880             (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
00881             AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
00882           if (SGPRUsed != MO.getReg()) {
00883             ++ConstantBusCount;
00884             SGPRUsed = MO.getReg();
00885           }
00886         }
00887       }
00888       // Literal constants use the constant bus.
00889       if (isLiteralConstant(MO))
00890         ++ConstantBusCount;
00891     }
00892     if (ConstantBusCount > 1) {
00893       ErrInfo = "VOP* instruction uses the constant bus more than once";
00894       return false;
00895     }
00896   }
00897 
00898   // Verify SRC1 for VOP2 and VOPC
00899   if (Src1Idx != -1 && (isVOP2(Opcode) || isVOPC(Opcode))) {
00900     const MachineOperand &Src1 = MI->getOperand(Src1Idx);
00901     if (Src1.isImm() || Src1.isFPImm()) {
00902       ErrInfo = "VOP[2C] src1 cannot be an immediate.";
00903       return false;
00904     }
00905   }
00906 
00907   // Verify VOP3
00908   if (isVOP3(Opcode)) {
00909     if (Src0Idx != -1 && isLiteralConstant(MI->getOperand(Src0Idx))) {
00910       ErrInfo = "VOP3 src0 cannot be a literal constant.";
00911       return false;
00912     }
00913     if (Src1Idx != -1 && isLiteralConstant(MI->getOperand(Src1Idx))) {
00914       ErrInfo = "VOP3 src1 cannot be a literal constant.";
00915       return false;
00916     }
00917     if (Src2Idx != -1 && isLiteralConstant(MI->getOperand(Src2Idx))) {
00918       ErrInfo = "VOP3 src2 cannot be a literal constant.";
00919       return false;
00920     }
00921   }
00922 
00923   // Verify misc. restrictions on specific instructions.
00924   if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 ||
00925       Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) {
00926     MI->dump();
00927 
00928     const MachineOperand &Src0 = MI->getOperand(2);
00929     const MachineOperand &Src1 = MI->getOperand(3);
00930     const MachineOperand &Src2 = MI->getOperand(4);
00931     if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
00932       if (!compareMachineOp(Src0, Src1) &&
00933           !compareMachineOp(Src0, Src2)) {
00934         ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
00935         return false;
00936       }
00937     }
00938   }
00939 
00940   return true;
00941 }
00942 
00943 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
00944   switch (MI.getOpcode()) {
00945   default: return AMDGPU::INSTRUCTION_LIST_END;
00946   case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
00947   case AMDGPU::COPY: return AMDGPU::COPY;
00948   case AMDGPU::PHI: return AMDGPU::PHI;
00949   case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
00950   case AMDGPU::S_MOV_B32:
00951     return MI.getOperand(1).isReg() ?
00952            AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
00953   case AMDGPU::S_ADD_I32:
00954   case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32;
00955   case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
00956   case AMDGPU::S_SUB_I32:
00957   case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32;
00958   case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
00959   case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32;
00960   case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32;
00961   case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32;
00962   case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32;
00963   case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e32;
00964   case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e32;
00965   case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e32;
00966   case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e32;
00967   case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
00968   case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
00969   case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
00970   case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
00971   case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
00972   case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
00973   case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
00974   case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
00975   case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
00976   case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
00977   case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
00978   case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
00979   case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
00980   case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
00981   case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
00982   case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
00983   case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
00984   case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
00985   case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
00986   case AMDGPU::S_LOAD_DWORD_IMM:
00987   case AMDGPU::S_LOAD_DWORD_SGPR: return AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
00988   case AMDGPU::S_LOAD_DWORDX2_IMM:
00989   case AMDGPU::S_LOAD_DWORDX2_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
00990   case AMDGPU::S_LOAD_DWORDX4_IMM:
00991   case AMDGPU::S_LOAD_DWORDX4_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
00992   case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e32;
00993   case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
00994   case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
00995   }
00996 }
00997 
00998 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
00999   return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
01000 }
01001 
01002 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
01003                                                       unsigned OpNo) const {
01004   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
01005   const MCInstrDesc &Desc = get(MI.getOpcode());
01006   if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
01007       Desc.OpInfo[OpNo].RegClass == -1)
01008     return MRI.getRegClass(MI.getOperand(OpNo).getReg());
01009 
01010   unsigned RCID = Desc.OpInfo[OpNo].RegClass;
01011   return RI.getRegClass(RCID);
01012 }
01013 
01014 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
01015   switch (MI.getOpcode()) {
01016   case AMDGPU::COPY:
01017   case AMDGPU::REG_SEQUENCE:
01018   case AMDGPU::PHI:
01019   case AMDGPU::INSERT_SUBREG:
01020     return RI.hasVGPRs(getOpRegClass(MI, 0));
01021   default:
01022     return RI.hasVGPRs(getOpRegClass(MI, OpNo));
01023   }
01024 }
01025 
01026 void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
01027   MachineBasicBlock::iterator I = MI;
01028   MachineOperand &MO = MI->getOperand(OpIdx);
01029   MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
01030   unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass;
01031   const TargetRegisterClass *RC = RI.getRegClass(RCID);
01032   unsigned Opcode = AMDGPU::V_MOV_B32_e32;
01033   if (MO.isReg()) {
01034     Opcode = AMDGPU::COPY;
01035   } else if (RI.isSGPRClass(RC)) {
01036     Opcode = AMDGPU::S_MOV_B32;
01037   }
01038 
01039   const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
01040   if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) {
01041     VRC = &AMDGPU::VReg_64RegClass;
01042   } else {
01043     VRC = &AMDGPU::VReg_32RegClass;
01044   }
01045   unsigned Reg = MRI.createVirtualRegister(VRC);
01046   BuildMI(*MI->getParent(), I, MI->getParent()->findDebugLoc(I), get(Opcode),
01047           Reg).addOperand(MO);
01048   MO.ChangeToRegister(Reg, false);
01049 }
01050 
01051 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
01052                                          MachineRegisterInfo &MRI,
01053                                          MachineOperand &SuperReg,
01054                                          const TargetRegisterClass *SuperRC,
01055                                          unsigned SubIdx,
01056                                          const TargetRegisterClass *SubRC)
01057                                          const {
01058   assert(SuperReg.isReg());
01059 
01060   unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
01061   unsigned SubReg = MRI.createVirtualRegister(SubRC);
01062 
01063   // Just in case the super register is itself a sub-register, copy it to a new
01064   // value so we don't need to worry about merging its subreg index with the
01065   // SubIdx passed to this function. The register coalescer should be able to
01066   // eliminate this extra copy.
01067   BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(TargetOpcode::COPY),
01068           NewSuperReg)
01069           .addOperand(SuperReg);
01070 
01071   BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(TargetOpcode::COPY),
01072           SubReg)
01073           .addReg(NewSuperReg, 0, SubIdx);
01074   return SubReg;
01075 }
01076 
01077 MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
01078   MachineBasicBlock::iterator MII,
01079   MachineRegisterInfo &MRI,
01080   MachineOperand &Op,
01081   const TargetRegisterClass *SuperRC,
01082   unsigned SubIdx,
01083   const TargetRegisterClass *SubRC) const {
01084   if (Op.isImm()) {
01085     // XXX - Is there a better way to do this?
01086     if (SubIdx == AMDGPU::sub0)
01087       return MachineOperand::CreateImm(Op.getImm() & 0xFFFFFFFF);
01088     if (SubIdx == AMDGPU::sub1)
01089       return MachineOperand::CreateImm(Op.getImm() >> 32);
01090 
01091     llvm_unreachable("Unhandled register index for immediate");
01092   }
01093 
01094   unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
01095                                        SubIdx, SubRC);
01096   return MachineOperand::CreateReg(SubReg, false);
01097 }
01098 
01099 unsigned SIInstrInfo::split64BitImm(SmallVectorImpl<MachineInstr *> &Worklist,
01100                                     MachineBasicBlock::iterator MI,
01101                                     MachineRegisterInfo &MRI,
01102                                     const TargetRegisterClass *RC,
01103                                     const MachineOperand &Op) const {
01104   MachineBasicBlock *MBB = MI->getParent();
01105   DebugLoc DL = MI->getDebugLoc();
01106   unsigned LoDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
01107   unsigned HiDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
01108   unsigned Dst = MRI.createVirtualRegister(RC);
01109 
01110   MachineInstr *Lo = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
01111                              LoDst)
01112     .addImm(Op.getImm() & 0xFFFFFFFF);
01113   MachineInstr *Hi = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
01114                              HiDst)
01115     .addImm(Op.getImm() >> 32);
01116 
01117   BuildMI(*MBB, MI, DL, get(TargetOpcode::REG_SEQUENCE), Dst)
01118     .addReg(LoDst)
01119     .addImm(AMDGPU::sub0)
01120     .addReg(HiDst)
01121     .addImm(AMDGPU::sub1);
01122 
01123   Worklist.push_back(Lo);
01124   Worklist.push_back(Hi);
01125 
01126   return Dst;
01127 }
01128 
01129 bool SIInstrInfo::isOperandLegal(const MachineInstr *MI, unsigned OpIdx,
01130                                  const MachineOperand *MO) const {
01131   const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
01132   const MCInstrDesc &InstDesc = get(MI->getOpcode());
01133   const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
01134   const TargetRegisterClass *DefinedRC =
01135       OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
01136   if (!MO)
01137     MO = &MI->getOperand(OpIdx);
01138 
01139   if (MO->isReg()) {
01140     assert(DefinedRC);
01141     const TargetRegisterClass *RC = MRI.getRegClass(MO->getReg());
01142     return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass));
01143   }
01144 
01145 
01146   // Handle non-register types that are treated like immediates.
01147   assert(MO->isImm() || MO->isFPImm() || MO->isTargetIndex() || MO->isFI());
01148 
01149   if (!DefinedRC)
01150     // This opperand expects an immediate
01151     return true;
01152 
01153   return RI.regClassCanUseImmediate(DefinedRC);
01154 }
01155 
01156 void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
01157   MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
01158 
01159   int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
01160                                            AMDGPU::OpName::src0);
01161   int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
01162                                            AMDGPU::OpName::src1);
01163   int Src2Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
01164                                            AMDGPU::OpName::src2);
01165 
01166   // Legalize VOP2
01167   if (isVOP2(MI->getOpcode()) && Src1Idx != -1) {
01168     // Legalize src0
01169     if (!isOperandLegal(MI, Src0Idx))
01170       legalizeOpWithMove(MI, Src0Idx);
01171 
01172     // Legalize src1
01173     if (isOperandLegal(MI, Src1Idx))
01174       return;
01175 
01176     // Usually src0 of VOP2 instructions allow more types of inputs
01177     // than src1, so try to commute the instruction to decrease our
01178     // chances of having to insert a MOV instruction to legalize src1.
01179     if (MI->isCommutable()) {
01180       if (commuteInstruction(MI))
01181         // If we are successful in commuting, then we know MI is legal, so
01182         // we are done.
01183         return;
01184     }
01185 
01186     legalizeOpWithMove(MI, Src1Idx);
01187     return;
01188   }
01189 
01190   // XXX - Do any VOP3 instructions read VCC?
01191   // Legalize VOP3
01192   if (isVOP3(MI->getOpcode())) {
01193     int VOP3Idx[3] = {Src0Idx, Src1Idx, Src2Idx};
01194     unsigned SGPRReg = AMDGPU::NoRegister;
01195     for (unsigned i = 0; i < 3; ++i) {
01196       int Idx = VOP3Idx[i];
01197       if (Idx == -1)
01198         continue;
01199       MachineOperand &MO = MI->getOperand(Idx);
01200 
01201       if (MO.isReg()) {
01202         if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
01203           continue; // VGPRs are legal
01204 
01205         assert(MO.getReg() != AMDGPU::SCC && "SCC operand to VOP3 instruction");
01206 
01207         if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
01208           SGPRReg = MO.getReg();
01209           // We can use one SGPR in each VOP3 instruction.
01210           continue;
01211         }
01212       } else if (!isLiteralConstant(MO)) {
01213         // If it is not a register and not a literal constant, then it must be
01214         // an inline constant which is always legal.
01215         continue;
01216       }
01217       // If we make it this far, then the operand is not legal and we must
01218       // legalize it.
01219       legalizeOpWithMove(MI, Idx);
01220     }
01221   }
01222 
01223   // Legalize REG_SEQUENCE and PHI
01224   // The register class of the operands much be the same type as the register
01225   // class of the output.
01226   if (MI->getOpcode() == AMDGPU::REG_SEQUENCE ||
01227       MI->getOpcode() == AMDGPU::PHI) {
01228     const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
01229     for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
01230       if (!MI->getOperand(i).isReg() ||
01231           !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
01232         continue;
01233       const TargetRegisterClass *OpRC =
01234               MRI.getRegClass(MI->getOperand(i).getReg());
01235       if (RI.hasVGPRs(OpRC)) {
01236         VRC = OpRC;
01237       } else {
01238         SRC = OpRC;
01239       }
01240     }
01241 
01242     // If any of the operands are VGPR registers, then they all most be
01243     // otherwise we will create illegal VGPR->SGPR copies when legalizing
01244     // them.
01245     if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) {
01246       if (!VRC) {
01247         assert(SRC);
01248         VRC = RI.getEquivalentVGPRClass(SRC);
01249       }
01250       RC = VRC;
01251     } else {
01252       RC = SRC;
01253     }
01254 
01255     // Update all the operands so they have the same type.
01256     for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
01257       if (!MI->getOperand(i).isReg() ||
01258           !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
01259         continue;
01260       unsigned DstReg = MRI.createVirtualRegister(RC);
01261       MachineBasicBlock *InsertBB;
01262       MachineBasicBlock::iterator Insert;
01263       if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
01264         InsertBB = MI->getParent();
01265         Insert = MI;
01266       } else {
01267         // MI is a PHI instruction.
01268         InsertBB = MI->getOperand(i + 1).getMBB();
01269         Insert = InsertBB->getFirstTerminator();
01270       }
01271       BuildMI(*InsertBB, Insert, MI->getDebugLoc(),
01272               get(AMDGPU::COPY), DstReg)
01273               .addOperand(MI->getOperand(i));
01274       MI->getOperand(i).setReg(DstReg);
01275     }
01276   }
01277 
01278   // Legalize INSERT_SUBREG
01279   // src0 must have the same register class as dst
01280   if (MI->getOpcode() == AMDGPU::INSERT_SUBREG) {
01281     unsigned Dst = MI->getOperand(0).getReg();
01282     unsigned Src0 = MI->getOperand(1).getReg();
01283     const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
01284     const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
01285     if (DstRC != Src0RC) {
01286       MachineBasicBlock &MBB = *MI->getParent();
01287       unsigned NewSrc0 = MRI.createVirtualRegister(DstRC);
01288       BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), NewSrc0)
01289               .addReg(Src0);
01290       MI->getOperand(1).setReg(NewSrc0);
01291     }
01292     return;
01293   }
01294 
01295   // Legalize MUBUF* instructions
01296   // FIXME: If we start using the non-addr64 instructions for compute, we
01297   // may need to legalize them here.
01298   int SRsrcIdx =
01299       AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::srsrc);
01300   if (SRsrcIdx != -1) {
01301     // We have an MUBUF instruction
01302     MachineOperand *SRsrc = &MI->getOperand(SRsrcIdx);
01303     unsigned SRsrcRC = get(MI->getOpcode()).OpInfo[SRsrcIdx].RegClass;
01304     if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()),
01305                                              RI.getRegClass(SRsrcRC))) {
01306       // The operands are legal.
01307       // FIXME: We may need to legalize operands besided srsrc.
01308       return;
01309     }
01310 
01311     MachineBasicBlock &MBB = *MI->getParent();
01312     // Extract the the ptr from the resource descriptor.
01313 
01314     // SRsrcPtrLo = srsrc:sub0
01315     unsigned SRsrcPtrLo = buildExtractSubReg(MI, MRI, *SRsrc,
01316         &AMDGPU::VReg_128RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
01317 
01318     // SRsrcPtrHi = srsrc:sub1
01319     unsigned SRsrcPtrHi = buildExtractSubReg(MI, MRI, *SRsrc,
01320         &AMDGPU::VReg_128RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
01321 
01322     // Create an empty resource descriptor
01323     unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
01324     unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
01325     unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
01326     unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
01327 
01328     // Zero64 = 0
01329     BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64),
01330             Zero64)
01331             .addImm(0);
01332 
01333     // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
01334     BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
01335             SRsrcFormatLo)
01336             .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
01337 
01338     // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
01339     BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
01340             SRsrcFormatHi)
01341             .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
01342 
01343     // NewSRsrc = {Zero64, SRsrcFormat}
01344     BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
01345             NewSRsrc)
01346             .addReg(Zero64)
01347             .addImm(AMDGPU::sub0_sub1)
01348             .addReg(SRsrcFormatLo)
01349             .addImm(AMDGPU::sub2)
01350             .addReg(SRsrcFormatHi)
01351             .addImm(AMDGPU::sub3);
01352 
01353     MachineOperand *VAddr = getNamedOperand(*MI, AMDGPU::OpName::vaddr);
01354     unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
01355     unsigned NewVAddrLo;
01356     unsigned NewVAddrHi;
01357     if (VAddr) {
01358       // This is already an ADDR64 instruction so we need to add the pointer
01359       // extracted from the resource descriptor to the current value of VAddr.
01360       NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
01361       NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
01362 
01363       // NewVaddrLo = SRsrcPtrLo + VAddr:sub0
01364       BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADD_I32_e32),
01365               NewVAddrLo)
01366               .addReg(SRsrcPtrLo)
01367               .addReg(VAddr->getReg(), 0, AMDGPU::sub0)
01368               .addReg(AMDGPU::VCC, RegState::ImplicitDefine);
01369 
01370       // NewVaddrHi = SRsrcPtrHi + VAddr:sub1
01371       BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADDC_U32_e32),
01372               NewVAddrHi)
01373               .addReg(SRsrcPtrHi)
01374               .addReg(VAddr->getReg(), 0, AMDGPU::sub1)
01375               .addReg(AMDGPU::VCC, RegState::ImplicitDefine)
01376               .addReg(AMDGPU::VCC, RegState::Implicit);
01377 
01378     } else {
01379       // This instructions is the _OFFSET variant, so we need to convert it to
01380       // ADDR64.
01381       MachineOperand *VData = getNamedOperand(*MI, AMDGPU::OpName::vdata);
01382       MachineOperand *Offset = getNamedOperand(*MI, AMDGPU::OpName::offset);
01383       MachineOperand *SOffset = getNamedOperand(*MI, AMDGPU::OpName::soffset);
01384       assert(SOffset->isImm() && SOffset->getImm() == 0 && "Legalizing MUBUF "
01385              "with non-zero soffset is not implemented");
01386       (void)SOffset;
01387 
01388       // Create the new instruction.
01389       unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI->getOpcode());
01390       MachineInstr *Addr64 =
01391           BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode))
01392                   .addOperand(*VData)
01393                   .addOperand(*SRsrc)
01394                   .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
01395                                               // This will be replaced later
01396                                               // with the new value of vaddr.
01397                   .addOperand(*Offset);
01398 
01399       MI->removeFromParent();
01400       MI = Addr64;
01401 
01402       NewVAddrLo = SRsrcPtrLo;
01403       NewVAddrHi = SRsrcPtrHi;
01404       VAddr = getNamedOperand(*MI, AMDGPU::OpName::vaddr);
01405       SRsrc = getNamedOperand(*MI, AMDGPU::OpName::srsrc);
01406     }
01407 
01408     // NewVaddr = {NewVaddrHi, NewVaddrLo}
01409     BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
01410             NewVAddr)
01411             .addReg(NewVAddrLo)
01412             .addImm(AMDGPU::sub0)
01413             .addReg(NewVAddrHi)
01414             .addImm(AMDGPU::sub1);
01415 
01416 
01417     // Update the instruction to use NewVaddr
01418     VAddr->setReg(NewVAddr);
01419     // Update the instruction to use NewSRsrc
01420     SRsrc->setReg(NewSRsrc);
01421   }
01422 }
01423 
01424 void SIInstrInfo::splitSMRD(MachineInstr *MI,
01425                             const TargetRegisterClass *HalfRC,
01426                             unsigned HalfImmOp, unsigned HalfSGPROp,
01427                             MachineInstr *&Lo, MachineInstr *&Hi) const {
01428 
01429   DebugLoc DL = MI->getDebugLoc();
01430   MachineBasicBlock *MBB = MI->getParent();
01431   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
01432   unsigned RegLo = MRI.createVirtualRegister(HalfRC);
01433   unsigned RegHi = MRI.createVirtualRegister(HalfRC);
01434   unsigned HalfSize = HalfRC->getSize();
01435   const MachineOperand *OffOp =
01436       getNamedOperand(*MI, AMDGPU::OpName::offset);
01437   const MachineOperand *SBase = getNamedOperand(*MI, AMDGPU::OpName::sbase);
01438 
01439   if (OffOp) {
01440     // Handle the _IMM variant
01441     unsigned LoOffset = OffOp->getImm();
01442     unsigned HiOffset = LoOffset + (HalfSize / 4);
01443     Lo = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegLo)
01444                   .addOperand(*SBase)
01445                   .addImm(LoOffset);
01446 
01447     if (!isUInt<8>(HiOffset)) {
01448       unsigned OffsetSGPR =
01449           MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
01450       BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32), OffsetSGPR)
01451               .addImm(HiOffset << 2);  // The immediate offset is in dwords,
01452                                        // but offset in register is in bytes.
01453       Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegHi)
01454                     .addOperand(*SBase)
01455                     .addReg(OffsetSGPR);
01456     } else {
01457       Hi = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegHi)
01458                      .addOperand(*SBase)
01459                      .addImm(HiOffset);
01460     }
01461   } else {
01462     // Handle the _SGPR variant
01463     MachineOperand *SOff = getNamedOperand(*MI, AMDGPU::OpName::soff);
01464     Lo = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegLo)
01465                   .addOperand(*SBase)
01466                   .addOperand(*SOff);
01467     unsigned OffsetSGPR = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
01468     BuildMI(*MBB, MI, DL, get(AMDGPU::S_ADD_I32), OffsetSGPR)
01469             .addOperand(*SOff)
01470             .addImm(HalfSize);
01471     Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp))
01472                   .addOperand(*SBase)
01473                   .addReg(OffsetSGPR);
01474   }
01475 
01476   unsigned SubLo, SubHi;
01477   switch (HalfSize) {
01478     case 4:
01479       SubLo = AMDGPU::sub0;
01480       SubHi = AMDGPU::sub1;
01481       break;
01482     case 8:
01483       SubLo = AMDGPU::sub0_sub1;
01484       SubHi = AMDGPU::sub2_sub3;
01485       break;
01486     case 16:
01487       SubLo = AMDGPU::sub0_sub1_sub2_sub3;
01488       SubHi = AMDGPU::sub4_sub5_sub6_sub7;
01489       break;
01490     case 32:
01491       SubLo = AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
01492       SubHi = AMDGPU::sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15;
01493       break;
01494     default:
01495       llvm_unreachable("Unhandled HalfSize");
01496   }
01497 
01498   BuildMI(*MBB, MI, DL, get(AMDGPU::REG_SEQUENCE))
01499           .addOperand(MI->getOperand(0))
01500           .addReg(RegLo)
01501           .addImm(SubLo)
01502           .addReg(RegHi)
01503           .addImm(SubHi);
01504 }
01505 
01506 void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, MachineRegisterInfo &MRI) const {
01507   MachineBasicBlock *MBB = MI->getParent();
01508   switch (MI->getOpcode()) {
01509     case AMDGPU::S_LOAD_DWORD_IMM:
01510     case AMDGPU::S_LOAD_DWORD_SGPR:
01511     case AMDGPU::S_LOAD_DWORDX2_IMM:
01512     case AMDGPU::S_LOAD_DWORDX2_SGPR:
01513     case AMDGPU::S_LOAD_DWORDX4_IMM:
01514     case AMDGPU::S_LOAD_DWORDX4_SGPR: {
01515       unsigned NewOpcode = getVALUOp(*MI);
01516       unsigned RegOffset;
01517       unsigned ImmOffset;
01518 
01519       if (MI->getOperand(2).isReg()) {
01520         RegOffset = MI->getOperand(2).getReg();
01521         ImmOffset = 0;
01522       } else {
01523         assert(MI->getOperand(2).isImm());
01524         // SMRD instructions take a dword offsets and MUBUF instructions
01525         // take a byte offset.
01526         ImmOffset = MI->getOperand(2).getImm() << 2;
01527         RegOffset = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
01528         if (isUInt<12>(ImmOffset)) {
01529           BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
01530                   RegOffset)
01531                   .addImm(0);
01532         } else {
01533           BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
01534                   RegOffset)
01535                   .addImm(ImmOffset);
01536           ImmOffset = 0;
01537         }
01538       }
01539 
01540       unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
01541       unsigned DWord0 = RegOffset;
01542       unsigned DWord1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
01543       unsigned DWord2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
01544       unsigned DWord3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
01545 
01546       BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord1)
01547               .addImm(0);
01548       BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord2)
01549               .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
01550       BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord3)
01551               .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
01552       BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), SRsrc)
01553               .addReg(DWord0)
01554               .addImm(AMDGPU::sub0)
01555               .addReg(DWord1)
01556               .addImm(AMDGPU::sub1)
01557               .addReg(DWord2)
01558               .addImm(AMDGPU::sub2)
01559               .addReg(DWord3)
01560               .addImm(AMDGPU::sub3);
01561       MI->setDesc(get(NewOpcode));
01562       if (MI->getOperand(2).isReg()) {
01563         MI->getOperand(2).setReg(MI->getOperand(1).getReg());
01564       } else {
01565         MI->getOperand(2).ChangeToRegister(MI->getOperand(1).getReg(), false);
01566       }
01567       MI->getOperand(1).setReg(SRsrc);
01568       MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(ImmOffset));
01569 
01570       const TargetRegisterClass *NewDstRC =
01571           RI.getRegClass(get(NewOpcode).OpInfo[0].RegClass);
01572 
01573       unsigned DstReg = MI->getOperand(0).getReg();
01574       unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
01575       MRI.replaceRegWith(DstReg, NewDstReg);
01576       break;
01577     }
01578     case AMDGPU::S_LOAD_DWORDX8_IMM:
01579     case AMDGPU::S_LOAD_DWORDX8_SGPR: {
01580       MachineInstr *Lo, *Hi;
01581       splitSMRD(MI, &AMDGPU::SReg_128RegClass, AMDGPU::S_LOAD_DWORDX4_IMM,
01582                 AMDGPU::S_LOAD_DWORDX4_SGPR, Lo, Hi);
01583       MI->eraseFromParent();
01584       moveSMRDToVALU(Lo, MRI);
01585       moveSMRDToVALU(Hi, MRI);
01586       break;
01587     }
01588 
01589     case AMDGPU::S_LOAD_DWORDX16_IMM:
01590     case AMDGPU::S_LOAD_DWORDX16_SGPR: {
01591       MachineInstr *Lo, *Hi;
01592       splitSMRD(MI, &AMDGPU::SReg_256RegClass, AMDGPU::S_LOAD_DWORDX8_IMM,
01593                 AMDGPU::S_LOAD_DWORDX8_SGPR, Lo, Hi);
01594       MI->eraseFromParent();
01595       moveSMRDToVALU(Lo, MRI);
01596       moveSMRDToVALU(Hi, MRI);
01597       break;
01598     }
01599   }
01600 }
01601 
01602 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
01603   SmallVector<MachineInstr *, 128> Worklist;
01604   Worklist.push_back(&TopInst);
01605 
01606   while (!Worklist.empty()) {
01607     MachineInstr *Inst = Worklist.pop_back_val();
01608     MachineBasicBlock *MBB = Inst->getParent();
01609     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
01610 
01611     unsigned Opcode = Inst->getOpcode();
01612     unsigned NewOpcode = getVALUOp(*Inst);
01613 
01614     // Handle some special cases
01615     switch (Opcode) {
01616     default:
01617       if (isSMRD(Inst->getOpcode())) {
01618         moveSMRDToVALU(Inst, MRI);
01619       }
01620       break;
01621     case AMDGPU::S_MOV_B64: {
01622       DebugLoc DL = Inst->getDebugLoc();
01623 
01624       // If the source operand is a register we can replace this with a
01625       // copy.
01626       if (Inst->getOperand(1).isReg()) {
01627         MachineInstr *Copy = BuildMI(*MBB, Inst, DL, get(TargetOpcode::COPY))
01628           .addOperand(Inst->getOperand(0))
01629           .addOperand(Inst->getOperand(1));
01630         Worklist.push_back(Copy);
01631       } else {
01632         // Otherwise, we need to split this into two movs, because there is
01633         // no 64-bit VALU move instruction.
01634         unsigned Reg = Inst->getOperand(0).getReg();
01635         unsigned Dst = split64BitImm(Worklist,
01636                                      Inst,
01637                                      MRI,
01638                                      MRI.getRegClass(Reg),
01639                                      Inst->getOperand(1));
01640         MRI.replaceRegWith(Reg, Dst);
01641       }
01642       Inst->eraseFromParent();
01643       continue;
01644     }
01645     case AMDGPU::S_AND_B64:
01646       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32);
01647       Inst->eraseFromParent();
01648       continue;
01649 
01650     case AMDGPU::S_OR_B64:
01651       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32);
01652       Inst->eraseFromParent();
01653       continue;
01654 
01655     case AMDGPU::S_XOR_B64:
01656       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32);
01657       Inst->eraseFromParent();
01658       continue;
01659 
01660     case AMDGPU::S_NOT_B64:
01661       splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32);
01662       Inst->eraseFromParent();
01663       continue;
01664 
01665     case AMDGPU::S_BCNT1_I32_B64:
01666       splitScalar64BitBCNT(Worklist, Inst);
01667       Inst->eraseFromParent();
01668       continue;
01669 
01670     case AMDGPU::S_BFE_U64:
01671     case AMDGPU::S_BFE_I64:
01672     case AMDGPU::S_BFM_B64:
01673       llvm_unreachable("Moving this op to VALU not implemented");
01674     }
01675 
01676     if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
01677       // We cannot move this instruction to the VALU, so we should try to
01678       // legalize its operands instead.
01679       legalizeOperands(Inst);
01680       continue;
01681     }
01682 
01683     // Use the new VALU Opcode.
01684     const MCInstrDesc &NewDesc = get(NewOpcode);
01685     Inst->setDesc(NewDesc);
01686 
01687     // Remove any references to SCC. Vector instructions can't read from it, and
01688     // We're just about to add the implicit use / defs of VCC, and we don't want
01689     // both.
01690     for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) {
01691       MachineOperand &Op = Inst->getOperand(i);
01692       if (Op.isReg() && Op.getReg() == AMDGPU::SCC)
01693         Inst->RemoveOperand(i);
01694     }
01695 
01696     if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
01697       // We are converting these to a BFE, so we need to add the missing
01698       // operands for the size and offset.
01699       unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
01700       Inst->addOperand(MachineOperand::CreateImm(0));
01701       Inst->addOperand(MachineOperand::CreateImm(Size));
01702 
01703     } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
01704       // The VALU version adds the second operand to the result, so insert an
01705       // extra 0 operand.
01706       Inst->addOperand(MachineOperand::CreateImm(0));
01707     }
01708 
01709     addDescImplicitUseDef(NewDesc, Inst);
01710 
01711     if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
01712       const MachineOperand &OffsetWidthOp = Inst->getOperand(2);
01713       // If we need to move this to VGPRs, we need to unpack the second operand
01714       // back into the 2 separate ones for bit offset and width.
01715       assert(OffsetWidthOp.isImm() &&
01716              "Scalar BFE is only implemented for constant width and offset");
01717       uint32_t Imm = OffsetWidthOp.getImm();
01718 
01719       uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
01720       uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
01721       Inst->RemoveOperand(2); // Remove old immediate.
01722       Inst->addOperand(MachineOperand::CreateImm(Offset));
01723       Inst->addOperand(MachineOperand::CreateImm(BitWidth));
01724     }
01725 
01726     // Update the destination register class.
01727 
01728     const TargetRegisterClass *NewDstRC = getOpRegClass(*Inst, 0);
01729 
01730     switch (Opcode) {
01731       // For target instructions, getOpRegClass just returns the virtual
01732       // register class associated with the operand, so we need to find an
01733       // equivalent VGPR register class in order to move the instruction to the
01734       // VALU.
01735     case AMDGPU::COPY:
01736     case AMDGPU::PHI:
01737     case AMDGPU::REG_SEQUENCE:
01738     case AMDGPU::INSERT_SUBREG:
01739       if (RI.hasVGPRs(NewDstRC))
01740         continue;
01741       NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
01742       if (!NewDstRC)
01743         continue;
01744       break;
01745     default:
01746       break;
01747     }
01748 
01749     unsigned DstReg = Inst->getOperand(0).getReg();
01750     unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
01751     MRI.replaceRegWith(DstReg, NewDstReg);
01752 
01753     // Legalize the operands
01754     legalizeOperands(Inst);
01755 
01756     for (MachineRegisterInfo::use_iterator I = MRI.use_begin(NewDstReg),
01757            E = MRI.use_end(); I != E; ++I) {
01758       MachineInstr &UseMI = *I->getParent();
01759       if (!canReadVGPR(UseMI, I.getOperandNo())) {
01760         Worklist.push_back(&UseMI);
01761       }
01762     }
01763   }
01764 }
01765 
01766 //===----------------------------------------------------------------------===//
01767 // Indirect addressing callbacks
01768 //===----------------------------------------------------------------------===//
01769 
01770 unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
01771                                                  unsigned Channel) const {
01772   assert(Channel == 0);
01773   return RegIndex;
01774 }
01775 
01776 const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
01777   return &AMDGPU::VReg_32RegClass;
01778 }
01779 
01780 void SIInstrInfo::splitScalar64BitUnaryOp(
01781   SmallVectorImpl<MachineInstr *> &Worklist,
01782   MachineInstr *Inst,
01783   unsigned Opcode) const {
01784   MachineBasicBlock &MBB = *Inst->getParent();
01785   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
01786 
01787   MachineOperand &Dest = Inst->getOperand(0);
01788   MachineOperand &Src0 = Inst->getOperand(1);
01789   DebugLoc DL = Inst->getDebugLoc();
01790 
01791   MachineBasicBlock::iterator MII = Inst;
01792 
01793   const MCInstrDesc &InstDesc = get(Opcode);
01794   const TargetRegisterClass *Src0RC = Src0.isReg() ?
01795     MRI.getRegClass(Src0.getReg()) :
01796     &AMDGPU::SGPR_32RegClass;
01797 
01798   const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
01799 
01800   MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
01801                                                        AMDGPU::sub0, Src0SubRC);
01802 
01803   const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
01804   const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
01805 
01806   unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
01807   MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
01808     .addOperand(SrcReg0Sub0);
01809 
01810   MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
01811                                                        AMDGPU::sub1, Src0SubRC);
01812 
01813   unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
01814   MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
01815     .addOperand(SrcReg0Sub1);
01816 
01817   unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
01818   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
01819     .addReg(DestSub0)
01820     .addImm(AMDGPU::sub0)
01821     .addReg(DestSub1)
01822     .addImm(AMDGPU::sub1);
01823 
01824   MRI.replaceRegWith(Dest.getReg(), FullDestReg);
01825 
01826   // Try to legalize the operands in case we need to swap the order to keep it
01827   // valid.
01828   Worklist.push_back(LoHalf);
01829   Worklist.push_back(HiHalf);
01830 }
01831 
01832 void SIInstrInfo::splitScalar64BitBinaryOp(
01833   SmallVectorImpl<MachineInstr *> &Worklist,
01834   MachineInstr *Inst,
01835   unsigned Opcode) const {
01836   MachineBasicBlock &MBB = *Inst->getParent();
01837   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
01838 
01839   MachineOperand &Dest = Inst->getOperand(0);
01840   MachineOperand &Src0 = Inst->getOperand(1);
01841   MachineOperand &Src1 = Inst->getOperand(2);
01842   DebugLoc DL = Inst->getDebugLoc();
01843 
01844   MachineBasicBlock::iterator MII = Inst;
01845 
01846   const MCInstrDesc &InstDesc = get(Opcode);
01847   const TargetRegisterClass *Src0RC = Src0.isReg() ?
01848     MRI.getRegClass(Src0.getReg()) :
01849     &AMDGPU::SGPR_32RegClass;
01850 
01851   const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
01852   const TargetRegisterClass *Src1RC = Src1.isReg() ?
01853     MRI.getRegClass(Src1.getReg()) :
01854     &AMDGPU::SGPR_32RegClass;
01855 
01856   const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
01857 
01858   MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
01859                                                        AMDGPU::sub0, Src0SubRC);
01860   MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
01861                                                        AMDGPU::sub0, Src1SubRC);
01862 
01863   const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
01864   const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
01865 
01866   unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
01867   MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
01868     .addOperand(SrcReg0Sub0)
01869     .addOperand(SrcReg1Sub0);
01870 
01871   MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
01872                                                        AMDGPU::sub1, Src0SubRC);
01873   MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
01874                                                        AMDGPU::sub1, Src1SubRC);
01875 
01876   unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
01877   MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
01878     .addOperand(SrcReg0Sub1)
01879     .addOperand(SrcReg1Sub1);
01880 
01881   unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
01882   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
01883     .addReg(DestSub0)
01884     .addImm(AMDGPU::sub0)
01885     .addReg(DestSub1)
01886     .addImm(AMDGPU::sub1);
01887 
01888   MRI.replaceRegWith(Dest.getReg(), FullDestReg);
01889 
01890   // Try to legalize the operands in case we need to swap the order to keep it
01891   // valid.
01892   Worklist.push_back(LoHalf);
01893   Worklist.push_back(HiHalf);
01894 }
01895 
01896 void SIInstrInfo::splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist,
01897                                        MachineInstr *Inst) const {
01898   MachineBasicBlock &MBB = *Inst->getParent();
01899   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
01900 
01901   MachineBasicBlock::iterator MII = Inst;
01902   DebugLoc DL = Inst->getDebugLoc();
01903 
01904   MachineOperand &Dest = Inst->getOperand(0);
01905   MachineOperand &Src = Inst->getOperand(1);
01906 
01907   const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e32);
01908   const TargetRegisterClass *SrcRC = Src.isReg() ?
01909     MRI.getRegClass(Src.getReg()) :
01910     &AMDGPU::SGPR_32RegClass;
01911 
01912   unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
01913   unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
01914 
01915   const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
01916 
01917   MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
01918                                                       AMDGPU::sub0, SrcSubRC);
01919   MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
01920                                                       AMDGPU::sub1, SrcSubRC);
01921 
01922   MachineInstr *First = BuildMI(MBB, MII, DL, InstDesc, MidReg)
01923     .addOperand(SrcRegSub0)
01924     .addImm(0);
01925 
01926   MachineInstr *Second = BuildMI(MBB, MII, DL, InstDesc, ResultReg)
01927     .addOperand(SrcRegSub1)
01928     .addReg(MidReg);
01929 
01930   MRI.replaceRegWith(Dest.getReg(), ResultReg);
01931 
01932   Worklist.push_back(First);
01933   Worklist.push_back(Second);
01934 }
01935 
01936 void SIInstrInfo::addDescImplicitUseDef(const MCInstrDesc &NewDesc,
01937                                         MachineInstr *Inst) const {
01938   // Add the implict and explicit register definitions.
01939   if (NewDesc.ImplicitUses) {
01940     for (unsigned i = 0; NewDesc.ImplicitUses[i]; ++i) {
01941       unsigned Reg = NewDesc.ImplicitUses[i];
01942       Inst->addOperand(MachineOperand::CreateReg(Reg, false, true));
01943     }
01944   }
01945 
01946   if (NewDesc.ImplicitDefs) {
01947     for (unsigned i = 0; NewDesc.ImplicitDefs[i]; ++i) {
01948       unsigned Reg = NewDesc.ImplicitDefs[i];
01949       Inst->addOperand(MachineOperand::CreateReg(Reg, true, true));
01950     }
01951   }
01952 }
01953 
01954 MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
01955                                    MachineBasicBlock *MBB,
01956                                    MachineBasicBlock::iterator I,
01957                                    unsigned ValueReg,
01958                                    unsigned Address, unsigned OffsetReg) const {
01959   const DebugLoc &DL = MBB->findDebugLoc(I);
01960   unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
01961                                       getIndirectIndexBegin(*MBB->getParent()));
01962 
01963   return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1))
01964           .addReg(IndirectBaseReg, RegState::Define)
01965           .addOperand(I->getOperand(0))
01966           .addReg(IndirectBaseReg)
01967           .addReg(OffsetReg)
01968           .addImm(0)
01969           .addReg(ValueReg);
01970 }
01971 
01972 MachineInstrBuilder SIInstrInfo::buildIndirectRead(
01973                                    MachineBasicBlock *MBB,
01974                                    MachineBasicBlock::iterator I,
01975                                    unsigned ValueReg,
01976                                    unsigned Address, unsigned OffsetReg) const {
01977   const DebugLoc &DL = MBB->findDebugLoc(I);
01978   unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
01979                                       getIndirectIndexBegin(*MBB->getParent()));
01980 
01981   return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC))
01982           .addOperand(I->getOperand(0))
01983           .addOperand(I->getOperand(1))
01984           .addReg(IndirectBaseReg)
01985           .addReg(OffsetReg)
01986           .addImm(0);
01987 
01988 }
01989 
01990 void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
01991                                             const MachineFunction &MF) const {
01992   int End = getIndirectIndexEnd(MF);
01993   int Begin = getIndirectIndexBegin(MF);
01994 
01995   if (End == -1)
01996     return;
01997 
01998 
01999   for (int Index = Begin; Index <= End; ++Index)
02000     Reserved.set(AMDGPU::VReg_32RegClass.getRegister(Index));
02001 
02002   for (int Index = std::max(0, Begin - 1); Index <= End; ++Index)
02003     Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index));
02004 
02005   for (int Index = std::max(0, Begin - 2); Index <= End; ++Index)
02006     Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index));
02007 
02008   for (int Index = std::max(0, Begin - 3); Index <= End; ++Index)
02009     Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index));
02010 
02011   for (int Index = std::max(0, Begin - 7); Index <= End; ++Index)
02012     Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index));
02013 
02014   for (int Index = std::max(0, Begin - 15); Index <= End; ++Index)
02015     Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));
02016 }
02017 
02018 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI,
02019                                                    unsigned OperandName) const {
02020   int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
02021   if (Idx == -1)
02022     return nullptr;
02023 
02024   return &MI.getOperand(Idx);
02025 }