LLVM API Documentation
00001 //===-- AArch64AdvSIMDScalar.cpp - Replace dead defs w/ zero reg --===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // When profitable, replace GPR targeting i64 instructions with their 00010 // AdvSIMD scalar equivalents. Generally speaking, "profitable" is defined 00011 // as minimizing the number of cross-class register copies. 00012 //===----------------------------------------------------------------------===// 00013 00014 //===----------------------------------------------------------------------===// 00015 // TODO: Graph based predicate heuristics. 00016 // Walking the instruction list linearly will get many, perhaps most, of 00017 // the cases, but to do a truly thorough job of this, we need a more 00018 // wholistic approach. 00019 // 00020 // This optimization is very similar in spirit to the register allocator's 00021 // spill placement, only here we're determining where to place cross-class 00022 // register copies rather than spills. As such, a similar approach is 00023 // called for. 00024 // 00025 // We want to build up a set of graphs of all instructions which are candidates 00026 // for transformation along with instructions which generate their inputs and 00027 // consume their outputs. For each edge in the graph, we assign a weight 00028 // based on whether there is a copy required there (weight zero if not) and 00029 // the block frequency of the block containing the defining or using 00030 // instruction, whichever is less. Our optimization is then a graph problem 00031 // to minimize the total weight of all the graphs, then transform instructions 00032 // and add or remove copy instructions as called for to implement the 00033 // solution. 00034 //===----------------------------------------------------------------------===// 00035 00036 #include "AArch64.h" 00037 #include "AArch64InstrInfo.h" 00038 #include "AArch64RegisterInfo.h" 00039 #include "AArch64Subtarget.h" 00040 #include "llvm/ADT/Statistic.h" 00041 #include "llvm/CodeGen/MachineFunction.h" 00042 #include "llvm/CodeGen/MachineFunctionPass.h" 00043 #include "llvm/CodeGen/MachineInstr.h" 00044 #include "llvm/CodeGen/MachineInstrBuilder.h" 00045 #include "llvm/CodeGen/MachineRegisterInfo.h" 00046 #include "llvm/Support/CommandLine.h" 00047 #include "llvm/Support/Debug.h" 00048 #include "llvm/Support/raw_ostream.h" 00049 using namespace llvm; 00050 00051 #define DEBUG_TYPE "aarch64-simd-scalar" 00052 00053 // Allow forcing all i64 operations with equivalent SIMD instructions to use 00054 // them. For stress-testing the transformation function. 00055 static cl::opt<bool> 00056 TransformAll("aarch64-simd-scalar-force-all", 00057 cl::desc("Force use of AdvSIMD scalar instructions everywhere"), 00058 cl::init(false), cl::Hidden); 00059 00060 STATISTIC(NumScalarInsnsUsed, "Number of scalar instructions used"); 00061 STATISTIC(NumCopiesDeleted, "Number of cross-class copies deleted"); 00062 STATISTIC(NumCopiesInserted, "Number of cross-class copies inserted"); 00063 00064 namespace { 00065 class AArch64AdvSIMDScalar : public MachineFunctionPass { 00066 MachineRegisterInfo *MRI; 00067 const AArch64InstrInfo *TII; 00068 00069 private: 00070 // isProfitableToTransform - Predicate function to determine whether an 00071 // instruction should be transformed to its equivalent AdvSIMD scalar 00072 // instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example. 00073 bool isProfitableToTransform(const MachineInstr *MI) const; 00074 00075 // transformInstruction - Perform the transformation of an instruction 00076 // to its equivalant AdvSIMD scalar instruction. Update inputs and outputs 00077 // to be the correct register class, minimizing cross-class copies. 00078 void transformInstruction(MachineInstr *MI); 00079 00080 // processMachineBasicBlock - Main optimzation loop. 00081 bool processMachineBasicBlock(MachineBasicBlock *MBB); 00082 00083 public: 00084 static char ID; // Pass identification, replacement for typeid. 00085 explicit AArch64AdvSIMDScalar() : MachineFunctionPass(ID) {} 00086 00087 bool runOnMachineFunction(MachineFunction &F) override; 00088 00089 const char *getPassName() const override { 00090 return "AdvSIMD Scalar Operation Optimization"; 00091 } 00092 00093 void getAnalysisUsage(AnalysisUsage &AU) const override { 00094 AU.setPreservesCFG(); 00095 MachineFunctionPass::getAnalysisUsage(AU); 00096 } 00097 }; 00098 char AArch64AdvSIMDScalar::ID = 0; 00099 } // end anonymous namespace 00100 00101 static bool isGPR64(unsigned Reg, unsigned SubReg, 00102 const MachineRegisterInfo *MRI) { 00103 if (SubReg) 00104 return false; 00105 if (TargetRegisterInfo::isVirtualRegister(Reg)) 00106 return MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::GPR64RegClass); 00107 return AArch64::GPR64RegClass.contains(Reg); 00108 } 00109 00110 static bool isFPR64(unsigned Reg, unsigned SubReg, 00111 const MachineRegisterInfo *MRI) { 00112 if (TargetRegisterInfo::isVirtualRegister(Reg)) 00113 return (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR64RegClass) && 00114 SubReg == 0) || 00115 (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR128RegClass) && 00116 SubReg == AArch64::dsub); 00117 // Physical register references just check the register class directly. 00118 return (AArch64::FPR64RegClass.contains(Reg) && SubReg == 0) || 00119 (AArch64::FPR128RegClass.contains(Reg) && SubReg == AArch64::dsub); 00120 } 00121 00122 // getSrcFromCopy - Get the original source register for a GPR64 <--> FPR64 00123 // copy instruction. Return zero_reg if the instruction is not a copy. 00124 static unsigned getSrcFromCopy(const MachineInstr *MI, 00125 const MachineRegisterInfo *MRI, 00126 unsigned &SubReg) { 00127 SubReg = 0; 00128 // The "FMOV Xd, Dn" instruction is the typical form. 00129 if (MI->getOpcode() == AArch64::FMOVDXr || 00130 MI->getOpcode() == AArch64::FMOVXDr) 00131 return MI->getOperand(1).getReg(); 00132 // A lane zero extract "UMOV.d Xd, Vn[0]" is equivalent. We shouldn't see 00133 // these at this stage, but it's easy to check for. 00134 if (MI->getOpcode() == AArch64::UMOVvi64 && MI->getOperand(2).getImm() == 0) { 00135 SubReg = AArch64::dsub; 00136 return MI->getOperand(1).getReg(); 00137 } 00138 // Or just a plain COPY instruction. This can be directly to/from FPR64, 00139 // or it can be a dsub subreg reference to an FPR128. 00140 if (MI->getOpcode() == AArch64::COPY) { 00141 if (isFPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(), 00142 MRI) && 00143 isGPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(), MRI)) 00144 return MI->getOperand(1).getReg(); 00145 if (isGPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(), 00146 MRI) && 00147 isFPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(), 00148 MRI)) { 00149 SubReg = MI->getOperand(1).getSubReg(); 00150 return MI->getOperand(1).getReg(); 00151 } 00152 } 00153 00154 // Otherwise, this is some other kind of instruction. 00155 return 0; 00156 } 00157 00158 // getTransformOpcode - For any opcode for which there is an AdvSIMD equivalent 00159 // that we're considering transforming to, return that AdvSIMD opcode. For all 00160 // others, return the original opcode. 00161 static int getTransformOpcode(unsigned Opc) { 00162 switch (Opc) { 00163 default: 00164 break; 00165 // FIXME: Lots more possibilities. 00166 case AArch64::ADDXrr: 00167 return AArch64::ADDv1i64; 00168 case AArch64::SUBXrr: 00169 return AArch64::SUBv1i64; 00170 case AArch64::ANDXrr: 00171 return AArch64::ANDv8i8; 00172 case AArch64::EORXrr: 00173 return AArch64::EORv8i8; 00174 case AArch64::ORRXrr: 00175 return AArch64::ORRv8i8; 00176 } 00177 // No AdvSIMD equivalent, so just return the original opcode. 00178 return Opc; 00179 } 00180 00181 static bool isTransformable(const MachineInstr *MI) { 00182 int Opc = MI->getOpcode(); 00183 return Opc != getTransformOpcode(Opc); 00184 } 00185 00186 // isProfitableToTransform - Predicate function to determine whether an 00187 // instruction should be transformed to its equivalent AdvSIMD scalar 00188 // instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example. 00189 bool 00190 AArch64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const { 00191 // If this instruction isn't eligible to be transformed (no SIMD equivalent), 00192 // early exit since that's the common case. 00193 if (!isTransformable(MI)) 00194 return false; 00195 00196 // Count the number of copies we'll need to add and approximate the number 00197 // of copies that a transform will enable us to remove. 00198 unsigned NumNewCopies = 3; 00199 unsigned NumRemovableCopies = 0; 00200 00201 unsigned OrigSrc0 = MI->getOperand(1).getReg(); 00202 unsigned OrigSrc1 = MI->getOperand(2).getReg(); 00203 unsigned Src0 = 0, SubReg0; 00204 unsigned Src1 = 0, SubReg1; 00205 if (!MRI->def_empty(OrigSrc0)) { 00206 MachineRegisterInfo::def_instr_iterator Def = 00207 MRI->def_instr_begin(OrigSrc0); 00208 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!"); 00209 Src0 = getSrcFromCopy(&*Def, MRI, SubReg0); 00210 // If the source was from a copy, we don't need to insert a new copy. 00211 if (Src0) 00212 --NumNewCopies; 00213 // If there are no other users of the original source, we can delete 00214 // that instruction. 00215 if (Src0 && MRI->hasOneNonDBGUse(OrigSrc0)) 00216 ++NumRemovableCopies; 00217 } 00218 if (!MRI->def_empty(OrigSrc1)) { 00219 MachineRegisterInfo::def_instr_iterator Def = 00220 MRI->def_instr_begin(OrigSrc1); 00221 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!"); 00222 Src1 = getSrcFromCopy(&*Def, MRI, SubReg1); 00223 if (Src1) 00224 --NumNewCopies; 00225 // If there are no other users of the original source, we can delete 00226 // that instruction. 00227 if (Src1 && MRI->hasOneNonDBGUse(OrigSrc1)) 00228 ++NumRemovableCopies; 00229 } 00230 00231 // If any of the uses of the original instructions is a cross class copy, 00232 // that's a copy that will be removable if we transform. Likewise, if 00233 // any of the uses is a transformable instruction, it's likely the tranforms 00234 // will chain, enabling us to save a copy there, too. This is an aggressive 00235 // heuristic that approximates the graph based cost analysis described above. 00236 unsigned Dst = MI->getOperand(0).getReg(); 00237 bool AllUsesAreCopies = true; 00238 for (MachineRegisterInfo::use_instr_nodbg_iterator 00239 Use = MRI->use_instr_nodbg_begin(Dst), 00240 E = MRI->use_instr_nodbg_end(); 00241 Use != E; ++Use) { 00242 unsigned SubReg; 00243 if (getSrcFromCopy(&*Use, MRI, SubReg) || isTransformable(&*Use)) 00244 ++NumRemovableCopies; 00245 // If the use is an INSERT_SUBREG, that's still something that can 00246 // directly use the FPR64, so we don't invalidate AllUsesAreCopies. It's 00247 // preferable to have it use the FPR64 in most cases, as if the source 00248 // vector is an IMPLICIT_DEF, the INSERT_SUBREG just goes away entirely. 00249 // Ditto for a lane insert. 00250 else if (Use->getOpcode() == AArch64::INSERT_SUBREG || 00251 Use->getOpcode() == AArch64::INSvi64gpr) 00252 ; 00253 else 00254 AllUsesAreCopies = false; 00255 } 00256 // If all of the uses of the original destination register are copies to 00257 // FPR64, then we won't end up having a new copy back to GPR64 either. 00258 if (AllUsesAreCopies) 00259 --NumNewCopies; 00260 00261 // If a transform will not increase the number of cross-class copies required, 00262 // return true. 00263 if (NumNewCopies <= NumRemovableCopies) 00264 return true; 00265 00266 // Finally, even if we otherwise wouldn't transform, check if we're forcing 00267 // transformation of everything. 00268 return TransformAll; 00269 } 00270 00271 static MachineInstr *insertCopy(const AArch64InstrInfo *TII, MachineInstr *MI, 00272 unsigned Dst, unsigned Src, bool IsKill) { 00273 MachineInstrBuilder MIB = 00274 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AArch64::COPY), 00275 Dst) 00276 .addReg(Src, getKillRegState(IsKill)); 00277 DEBUG(dbgs() << " adding copy: " << *MIB); 00278 ++NumCopiesInserted; 00279 return MIB; 00280 } 00281 00282 // transformInstruction - Perform the transformation of an instruction 00283 // to its equivalant AdvSIMD scalar instruction. Update inputs and outputs 00284 // to be the correct register class, minimizing cross-class copies. 00285 void AArch64AdvSIMDScalar::transformInstruction(MachineInstr *MI) { 00286 DEBUG(dbgs() << "Scalar transform: " << *MI); 00287 00288 MachineBasicBlock *MBB = MI->getParent(); 00289 int OldOpc = MI->getOpcode(); 00290 int NewOpc = getTransformOpcode(OldOpc); 00291 assert(OldOpc != NewOpc && "transform an instruction to itself?!"); 00292 00293 // Check if we need a copy for the source registers. 00294 unsigned OrigSrc0 = MI->getOperand(1).getReg(); 00295 unsigned OrigSrc1 = MI->getOperand(2).getReg(); 00296 unsigned Src0 = 0, SubReg0; 00297 unsigned Src1 = 0, SubReg1; 00298 if (!MRI->def_empty(OrigSrc0)) { 00299 MachineRegisterInfo::def_instr_iterator Def = 00300 MRI->def_instr_begin(OrigSrc0); 00301 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!"); 00302 Src0 = getSrcFromCopy(&*Def, MRI, SubReg0); 00303 // If there are no other users of the original source, we can delete 00304 // that instruction. 00305 if (Src0 && MRI->hasOneNonDBGUse(OrigSrc0)) { 00306 assert(Src0 && "Can't delete copy w/o a valid original source!"); 00307 Def->eraseFromParent(); 00308 ++NumCopiesDeleted; 00309 } 00310 } 00311 if (!MRI->def_empty(OrigSrc1)) { 00312 MachineRegisterInfo::def_instr_iterator Def = 00313 MRI->def_instr_begin(OrigSrc1); 00314 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!"); 00315 Src1 = getSrcFromCopy(&*Def, MRI, SubReg1); 00316 // If there are no other users of the original source, we can delete 00317 // that instruction. 00318 if (Src1 && MRI->hasOneNonDBGUse(OrigSrc1)) { 00319 assert(Src1 && "Can't delete copy w/o a valid original source!"); 00320 Def->eraseFromParent(); 00321 ++NumCopiesDeleted; 00322 } 00323 } 00324 // If we weren't able to reference the original source directly, create a 00325 // copy. 00326 if (!Src0) { 00327 SubReg0 = 0; 00328 Src0 = MRI->createVirtualRegister(&AArch64::FPR64RegClass); 00329 insertCopy(TII, MI, Src0, OrigSrc0, true); 00330 } 00331 if (!Src1) { 00332 SubReg1 = 0; 00333 Src1 = MRI->createVirtualRegister(&AArch64::FPR64RegClass); 00334 insertCopy(TII, MI, Src1, OrigSrc1, true); 00335 } 00336 00337 // Create a vreg for the destination. 00338 // FIXME: No need to do this if the ultimate user expects an FPR64. 00339 // Check for that and avoid the copy if possible. 00340 unsigned Dst = MRI->createVirtualRegister(&AArch64::FPR64RegClass); 00341 00342 // For now, all of the new instructions have the same simple three-register 00343 // form, so no need to special case based on what instruction we're 00344 // building. 00345 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(NewOpc), Dst) 00346 .addReg(Src0, getKillRegState(true), SubReg0) 00347 .addReg(Src1, getKillRegState(true), SubReg1); 00348 00349 // Now copy the result back out to a GPR. 00350 // FIXME: Try to avoid this if all uses could actually just use the FPR64 00351 // directly. 00352 insertCopy(TII, MI, MI->getOperand(0).getReg(), Dst, true); 00353 00354 // Erase the old instruction. 00355 MI->eraseFromParent(); 00356 00357 ++NumScalarInsnsUsed; 00358 } 00359 00360 // processMachineBasicBlock - Main optimzation loop. 00361 bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) { 00362 bool Changed = false; 00363 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) { 00364 MachineInstr *MI = I; 00365 ++I; 00366 if (isProfitableToTransform(MI)) { 00367 transformInstruction(MI); 00368 Changed = true; 00369 } 00370 } 00371 return Changed; 00372 } 00373 00374 // runOnMachineFunction - Pass entry point from PassManager. 00375 bool AArch64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) { 00376 bool Changed = false; 00377 DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n"); 00378 00379 const TargetMachine &TM = mf.getTarget(); 00380 MRI = &mf.getRegInfo(); 00381 TII = static_cast<const AArch64InstrInfo *>( 00382 TM.getSubtargetImpl()->getInstrInfo()); 00383 00384 // Just check things on a one-block-at-a-time basis. 00385 for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I) 00386 if (processMachineBasicBlock(I)) 00387 Changed = true; 00388 return Changed; 00389 } 00390 00391 // createAArch64AdvSIMDScalar - Factory function used by AArch64TargetMachine 00392 // to add the pass to the PassManager. 00393 FunctionPass *llvm::createAArch64AdvSIMDScalar() { 00394 return new AArch64AdvSIMDScalar(); 00395 }