LLVM API Documentation

SIRegisterInfo.cpp
Go to the documentation of this file.
00001 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 /// \file
00011 /// \brief SI implementation of the TargetRegisterInfo class.
00012 //
00013 //===----------------------------------------------------------------------===//
00014 
00015 
00016 #include "SIRegisterInfo.h"
00017 #include "AMDGPUSubtarget.h"
00018 #include "SIInstrInfo.h"
00019 #include "SIMachineFunctionInfo.h"
00020 #include "llvm/CodeGen/MachineFrameInfo.h"
00021 #include "llvm/CodeGen/MachineInstrBuilder.h"
00022 #include "llvm/CodeGen/RegisterScavenging.h"
00023 #include "llvm/IR/Function.h"
00024 #include "llvm/IR/LLVMContext.h"
00025 
00026 using namespace llvm;
00027 
00028 SIRegisterInfo::SIRegisterInfo(const AMDGPUSubtarget &st)
00029 : AMDGPURegisterInfo(st)
00030   { }
00031 
00032 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
00033   BitVector Reserved(getNumRegs());
00034   Reserved.set(AMDGPU::EXEC);
00035   Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
00036   Reserved.set(AMDGPU::FLAT_SCR);
00037   return Reserved;
00038 }
00039 
00040 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
00041                                              MachineFunction &MF) const {
00042   return RC->getNumRegs();
00043 }
00044 
00045 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
00046   return Fn.getFrameInfo()->hasStackObjects();
00047 }
00048 
00049 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
00050 
00051   switch (Op) {
00052   case AMDGPU::SI_SPILL_S512_SAVE:
00053   case AMDGPU::SI_SPILL_S512_RESTORE:
00054     return 16;
00055   case AMDGPU::SI_SPILL_S256_SAVE:
00056   case AMDGPU::SI_SPILL_S256_RESTORE:
00057     return 8;
00058   case AMDGPU::SI_SPILL_S128_SAVE:
00059   case AMDGPU::SI_SPILL_S128_RESTORE:
00060     return 4;
00061   case AMDGPU::SI_SPILL_S64_SAVE:
00062   case AMDGPU::SI_SPILL_S64_RESTORE:
00063     return 2;
00064   case AMDGPU::SI_SPILL_S32_SAVE:
00065   case AMDGPU::SI_SPILL_S32_RESTORE:
00066     return 1;
00067   default: llvm_unreachable("Invalid spill opcode");
00068   }
00069 }
00070 
00071 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
00072                                         int SPAdj, unsigned FIOperandNum,
00073                                         RegScavenger *RS) const {
00074   MachineFunction *MF = MI->getParent()->getParent();
00075   MachineBasicBlock *MBB = MI->getParent();
00076   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
00077   MachineFrameInfo *FrameInfo = MF->getFrameInfo();
00078   const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo());
00079   DebugLoc DL = MI->getDebugLoc();
00080 
00081   MachineOperand &FIOp = MI->getOperand(FIOperandNum);
00082   int Index = MI->getOperand(FIOperandNum).getIndex();
00083 
00084   switch (MI->getOpcode()) {
00085     // SGPR register spill
00086     case AMDGPU::SI_SPILL_S512_SAVE:
00087     case AMDGPU::SI_SPILL_S256_SAVE:
00088     case AMDGPU::SI_SPILL_S128_SAVE:
00089     case AMDGPU::SI_SPILL_S64_SAVE:
00090     case AMDGPU::SI_SPILL_S32_SAVE: {
00091       unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
00092 
00093       for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
00094         unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
00095                                            &AMDGPU::SGPR_32RegClass, i);
00096         struct SIMachineFunctionInfo::SpilledReg Spill =
00097             MFI->getSpilledReg(MF, Index, i);
00098 
00099         if (Spill.VGPR == AMDGPU::NoRegister) {
00100            LLVMContext &Ctx = MF->getFunction()->getContext();
00101            Ctx.emitError("Ran out of VGPRs for spilling SGPR");
00102         }
00103 
00104         BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill.VGPR)
00105                 .addReg(SubReg)
00106                 .addImm(Spill.Lane);
00107 
00108       }
00109       MI->eraseFromParent();
00110       break;
00111     }
00112 
00113     // SGPR register restore
00114     case AMDGPU::SI_SPILL_S512_RESTORE:
00115     case AMDGPU::SI_SPILL_S256_RESTORE:
00116     case AMDGPU::SI_SPILL_S128_RESTORE:
00117     case AMDGPU::SI_SPILL_S64_RESTORE:
00118     case AMDGPU::SI_SPILL_S32_RESTORE: {
00119       unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
00120 
00121       for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
00122         unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
00123                                            &AMDGPU::SGPR_32RegClass, i);
00124         struct SIMachineFunctionInfo::SpilledReg Spill =
00125             MFI->getSpilledReg(MF, Index, i);
00126 
00127         if (Spill.VGPR == AMDGPU::NoRegister) {
00128            LLVMContext &Ctx = MF->getFunction()->getContext();
00129            Ctx.emitError("Ran out of VGPRs for spilling SGPR");
00130         }
00131 
00132         BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), SubReg)
00133                 .addReg(Spill.VGPR)
00134                 .addImm(Spill.Lane);
00135 
00136       }
00137       TII->insertNOPs(MI, 3);
00138       MI->eraseFromParent();
00139       break;
00140     }
00141 
00142     default: {
00143       int64_t Offset = FrameInfo->getObjectOffset(Index);
00144       FIOp.ChangeToImmediate(Offset);
00145       if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
00146         unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VReg_32RegClass, MI, SPAdj);
00147         BuildMI(*MBB, MI, MI->getDebugLoc(),
00148                 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
00149                 .addImm(Offset);
00150         FIOp.ChangeToRegister(TmpReg, false);
00151       }
00152     }
00153   }
00154 }
00155 
00156 const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass(
00157                                                                    MVT VT) const {
00158   switch(VT.SimpleTy) {
00159     default:
00160     case MVT::i32: return &AMDGPU::VReg_32RegClass;
00161   }
00162 }
00163 
00164 unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
00165   return getEncodingValue(Reg) & 0xff;
00166 }
00167 
00168 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
00169   assert(!TargetRegisterInfo::isVirtualRegister(Reg));
00170 
00171   const TargetRegisterClass *BaseClasses[] = {
00172     &AMDGPU::VReg_32RegClass,
00173     &AMDGPU::SReg_32RegClass,
00174     &AMDGPU::VReg_64RegClass,
00175     &AMDGPU::SReg_64RegClass,
00176     &AMDGPU::SReg_128RegClass,
00177     &AMDGPU::SReg_256RegClass
00178   };
00179 
00180   for (const TargetRegisterClass *BaseClass : BaseClasses) {
00181     if (BaseClass->contains(Reg)) {
00182       return BaseClass;
00183     }
00184   }
00185   return nullptr;
00186 }
00187 
00188 bool SIRegisterInfo::isSGPRClass(const TargetRegisterClass *RC) const {
00189   if (!RC) {
00190     return false;
00191   }
00192   return !hasVGPRs(RC);
00193 }
00194 
00195 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
00196   return getCommonSubClass(&AMDGPU::VReg_32RegClass, RC) ||
00197          getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) ||
00198          getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) ||
00199          getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) ||
00200          getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) ||
00201          getCommonSubClass(&AMDGPU::VReg_512RegClass, RC);
00202 }
00203 
00204 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
00205                                          const TargetRegisterClass *SRC) const {
00206     if (hasVGPRs(SRC)) {
00207       return SRC;
00208     } else if (SRC == &AMDGPU::SCCRegRegClass) {
00209       return &AMDGPU::VCCRegRegClass;
00210     } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_32RegClass)) {
00211       return &AMDGPU::VReg_32RegClass;
00212     } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_64RegClass)) {
00213       return &AMDGPU::VReg_64RegClass;
00214     } else if (getCommonSubClass(SRC, &AMDGPU::SReg_128RegClass)) {
00215       return &AMDGPU::VReg_128RegClass;
00216     } else if (getCommonSubClass(SRC, &AMDGPU::SReg_256RegClass)) {
00217       return &AMDGPU::VReg_256RegClass;
00218     } else if (getCommonSubClass(SRC, &AMDGPU::SReg_512RegClass)) {
00219       return &AMDGPU::VReg_512RegClass;
00220     }
00221     return nullptr;
00222 }
00223 
00224 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
00225                          const TargetRegisterClass *RC, unsigned SubIdx) const {
00226   if (SubIdx == AMDGPU::NoSubRegister)
00227     return RC;
00228 
00229   // If this register has a sub-register, we can safely assume it is a 32-bit
00230   // register, because all of SI's sub-registers are 32-bit.
00231   if (isSGPRClass(RC)) {
00232     return &AMDGPU::SGPR_32RegClass;
00233   } else {
00234     return &AMDGPU::VGPR_32RegClass;
00235   }
00236 }
00237 
00238 unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
00239                                           const TargetRegisterClass *SubRC,
00240                                           unsigned Channel) const {
00241 
00242   switch (Reg) {
00243     case AMDGPU::VCC:
00244       switch(Channel) {
00245         case 0: return AMDGPU::VCC_LO;
00246         case 1: return AMDGPU::VCC_HI;
00247         default: llvm_unreachable("Invalid SubIdx for VCC");
00248       }
00249       break;
00250 
00251   case AMDGPU::FLAT_SCR:
00252     switch (Channel) {
00253     case 0:
00254       return AMDGPU::FLAT_SCR_LO;
00255     case 1:
00256       return AMDGPU::FLAT_SCR_HI;
00257     default:
00258       llvm_unreachable("Invalid SubIdx for FLAT_SCR");
00259     }
00260     break;
00261 
00262   case AMDGPU::EXEC:
00263     switch (Channel) {
00264     case 0:
00265       return AMDGPU::EXEC_LO;
00266     case 1:
00267       return AMDGPU::EXEC_HI;
00268     default:
00269       llvm_unreachable("Invalid SubIdx for EXEC");
00270     }
00271     break;
00272   }
00273 
00274   unsigned Index = getHWRegIndex(Reg);
00275   return SubRC->getRegister(Index + Channel);
00276 }
00277 
00278 bool SIRegisterInfo::regClassCanUseImmediate(int RCID) const {
00279   switch (RCID) {
00280   default: return false;
00281   case AMDGPU::SSrc_32RegClassID:
00282   case AMDGPU::SSrc_64RegClassID:
00283   case AMDGPU::VSrc_32RegClassID:
00284   case AMDGPU::VSrc_64RegClassID:
00285     return true;
00286   }
00287 }
00288 
00289 bool SIRegisterInfo::regClassCanUseImmediate(
00290                              const TargetRegisterClass *RC) const {
00291   return regClassCanUseImmediate(RC->getID());
00292 }
00293 
00294 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
00295                                            enum PreloadedValue Value) const {
00296 
00297   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
00298   switch (Value) {
00299   case SIRegisterInfo::TGID_X:
00300     return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 0);
00301   case SIRegisterInfo::TGID_Y:
00302     return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 1);
00303   case SIRegisterInfo::TGID_Z:
00304     return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 2);
00305   case SIRegisterInfo::SCRATCH_WAVE_OFFSET:
00306     return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 4);
00307   case SIRegisterInfo::SCRATCH_PTR:
00308     return AMDGPU::SGPR2_SGPR3;
00309   }
00310   llvm_unreachable("unexpected preloaded value type");
00311 }