LLVM API Documentation
00001 //===-- Thumb2SizeReduction.cpp - Thumb2 code size reduction pass -*- C++ -*-=// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 00010 #include "ARM.h" 00011 #include "ARMBaseInstrInfo.h" 00012 #include "ARMSubtarget.h" 00013 #include "MCTargetDesc/ARMAddressingModes.h" 00014 #include "Thumb2InstrInfo.h" 00015 #include "llvm/ADT/DenseMap.h" 00016 #include "llvm/ADT/PostOrderIterator.h" 00017 #include "llvm/ADT/Statistic.h" 00018 #include "llvm/CodeGen/MachineFunctionPass.h" 00019 #include "llvm/CodeGen/MachineInstr.h" 00020 #include "llvm/CodeGen/MachineInstrBuilder.h" 00021 #include "llvm/IR/Function.h" // To access Function attributes 00022 #include "llvm/Support/CommandLine.h" 00023 #include "llvm/Support/Debug.h" 00024 #include "llvm/Target/TargetMachine.h" 00025 using namespace llvm; 00026 00027 #define DEBUG_TYPE "t2-reduce-size" 00028 00029 STATISTIC(NumNarrows, "Number of 32-bit instrs reduced to 16-bit ones"); 00030 STATISTIC(Num2Addrs, "Number of 32-bit instrs reduced to 2addr 16-bit ones"); 00031 STATISTIC(NumLdSts, "Number of 32-bit load / store reduced to 16-bit ones"); 00032 00033 static cl::opt<int> ReduceLimit("t2-reduce-limit", 00034 cl::init(-1), cl::Hidden); 00035 static cl::opt<int> ReduceLimit2Addr("t2-reduce-limit2", 00036 cl::init(-1), cl::Hidden); 00037 static cl::opt<int> ReduceLimitLdSt("t2-reduce-limit3", 00038 cl::init(-1), cl::Hidden); 00039 00040 namespace { 00041 /// ReduceTable - A static table with information on mapping from wide 00042 /// opcodes to narrow 00043 struct ReduceEntry { 00044 uint16_t WideOpc; // Wide opcode 00045 uint16_t NarrowOpc1; // Narrow opcode to transform to 00046 uint16_t NarrowOpc2; // Narrow opcode when it's two-address 00047 uint8_t Imm1Limit; // Limit of immediate field (bits) 00048 uint8_t Imm2Limit; // Limit of immediate field when it's two-address 00049 unsigned LowRegs1 : 1; // Only possible if low-registers are used 00050 unsigned LowRegs2 : 1; // Only possible if low-registers are used (2addr) 00051 unsigned PredCC1 : 2; // 0 - If predicated, cc is on and vice versa. 00052 // 1 - No cc field. 00053 // 2 - Always set CPSR. 00054 unsigned PredCC2 : 2; 00055 unsigned PartFlag : 1; // 16-bit instruction does partial flag update 00056 unsigned Special : 1; // Needs to be dealt with specially 00057 unsigned AvoidMovs: 1; // Avoid movs with shifter operand (for Swift) 00058 }; 00059 00060 static const ReduceEntry ReduceTable[] = { 00061 // Wide, Narrow1, Narrow2, imm1,imm2, lo1, lo2, P/C,PF,S,AM 00062 { ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0,0, 0,0,0 }, 00063 { ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0,1,0 }, 00064 { ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0,0,0 }, 00065 { ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 0,1,0 }, 00066 { ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 0,1,0 }, 00067 { ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 1,0,0 }, 00068 { ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, 00069 { ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0,0, 1,0,1 }, 00070 { ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 1,0,0 }, 00071 //FIXME: Disable CMN, as CCodes are backwards from compare expectations 00072 //{ ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 00073 { ARM::t2CMNzrr, ARM::tCMNz, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 00074 { ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0,0,0 }, 00075 { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0,1,0 }, 00076 { ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 1,0,0 }, 00077 // FIXME: adr.n immediate offset must be multiple of 4. 00078 //{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 00079 { ARM::t2LSLri, ARM::tLSLri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, 00080 { ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0,0, 1,0,1 }, 00081 { ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, 00082 { ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0,0, 1,0,1 }, 00083 { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,0,0 }, 00084 { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,1,0 }, 00085 // FIXME: Do we need the 16-bit 'S' variant? 00086 { ARM::t2MOVr,ARM::tMOVr, 0, 0, 0, 0, 0, 1,0, 0,0,0 }, 00087 { ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 1,0,0 }, 00088 { ARM::t2MVNr, ARM::tMVN, 0, 0, 0, 1, 0, 0,0, 0,0,0 }, 00089 { ARM::t2ORRrr, 0, ARM::tORR, 0, 0, 0, 1, 0,0, 1,0,0 }, 00090 { ARM::t2REV, ARM::tREV, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 00091 { ARM::t2REV16, ARM::tREV16, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 00092 { ARM::t2REVSH, ARM::tREVSH, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 00093 { ARM::t2RORrr, 0, ARM::tROR, 0, 0, 0, 1, 0,0, 1,0,0 }, 00094 { ARM::t2RSBri, ARM::tRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 00095 { ARM::t2RSBSri,ARM::tRSB, 0, 0, 0, 1, 0, 2,0, 0,1,0 }, 00096 { ARM::t2SBCrr, 0, ARM::tSBC, 0, 0, 0, 1, 0,0, 0,0,0 }, 00097 { ARM::t2SUBri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 0,0, 0,0,0 }, 00098 { ARM::t2SUBrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 0,0, 0,0,0 }, 00099 { ARM::t2SUBSri,ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 2,2, 0,0,0 }, 00100 { ARM::t2SUBSrr,ARM::tSUBrr, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 00101 { ARM::t2SXTB, ARM::tSXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 00102 { ARM::t2SXTH, ARM::tSXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 00103 { ARM::t2TSTrr, ARM::tTST, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 00104 { ARM::t2UXTB, ARM::tUXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 00105 { ARM::t2UXTH, ARM::tUXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 00106 00107 // FIXME: Clean this up after splitting each Thumb load / store opcode 00108 // into multiple ones. 00109 { ARM::t2LDRi12,ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 0,1,0 }, 00110 { ARM::t2LDRs, ARM::tLDRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 00111 { ARM::t2LDRBi12,ARM::tLDRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 00112 { ARM::t2LDRBs, ARM::tLDRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 00113 { ARM::t2LDRHi12,ARM::tLDRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 00114 { ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 00115 { ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 00116 { ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 00117 { ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 0,1,0 }, 00118 { ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 00119 { ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 00120 { ARM::t2STRBs, ARM::tSTRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 00121 { ARM::t2STRHi12,ARM::tSTRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 00122 { ARM::t2STRHs, ARM::tSTRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 00123 00124 { ARM::t2LDMIA, ARM::tLDMIA, 0, 0, 0, 1, 1, 1,1, 0,1,0 }, 00125 { ARM::t2LDMIA_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 0,1,0 }, 00126 { ARM::t2LDMIA_UPD,ARM::tLDMIA_UPD,ARM::tPOP,0, 0, 1, 1, 1,1, 0,1,0 }, 00127 // ARM::t2STM (with no basereg writeback) has no Thumb1 equivalent 00128 { ARM::t2STMIA_UPD,ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 }, 00129 { ARM::t2STMDB_UPD, 0, ARM::tPUSH, 0, 0, 1, 1, 1,1, 0,1,0 } 00130 }; 00131 00132 class Thumb2SizeReduce : public MachineFunctionPass { 00133 public: 00134 static char ID; 00135 Thumb2SizeReduce(); 00136 00137 const Thumb2InstrInfo *TII; 00138 const ARMSubtarget *STI; 00139 00140 bool runOnMachineFunction(MachineFunction &MF) override; 00141 00142 const char *getPassName() const override { 00143 return "Thumb2 instruction size reduction pass"; 00144 } 00145 00146 private: 00147 /// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable. 00148 DenseMap<unsigned, unsigned> ReduceOpcodeMap; 00149 00150 bool canAddPseudoFlagDep(MachineInstr *Use, bool IsSelfLoop); 00151 00152 bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry, 00153 bool is2Addr, ARMCC::CondCodes Pred, 00154 bool LiveCPSR, bool &HasCC, bool &CCDead); 00155 00156 bool ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, 00157 const ReduceEntry &Entry); 00158 00159 bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, 00160 const ReduceEntry &Entry, bool LiveCPSR, bool IsSelfLoop); 00161 00162 /// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address 00163 /// instruction. 00164 bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, 00165 const ReduceEntry &Entry, bool LiveCPSR, 00166 bool IsSelfLoop); 00167 00168 /// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit 00169 /// non-two-address instruction. 00170 bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, 00171 const ReduceEntry &Entry, bool LiveCPSR, 00172 bool IsSelfLoop); 00173 00174 /// ReduceMI - Attempt to reduce MI, return true on success. 00175 bool ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI, 00176 bool LiveCPSR, bool IsSelfLoop); 00177 00178 /// ReduceMBB - Reduce width of instructions in the specified basic block. 00179 bool ReduceMBB(MachineBasicBlock &MBB); 00180 00181 bool OptimizeSize; 00182 bool MinimizeSize; 00183 00184 // Last instruction to define CPSR in the current block. 00185 MachineInstr *CPSRDef; 00186 // Was CPSR last defined by a high latency instruction? 00187 // When CPSRDef is null, this refers to CPSR defs in predecessors. 00188 bool HighLatencyCPSR; 00189 00190 struct MBBInfo { 00191 // The flags leaving this block have high latency. 00192 bool HighLatencyCPSR; 00193 // Has this block been visited yet? 00194 bool Visited; 00195 00196 MBBInfo() : HighLatencyCPSR(false), Visited(false) {} 00197 }; 00198 00199 SmallVector<MBBInfo, 8> BlockInfo; 00200 }; 00201 char Thumb2SizeReduce::ID = 0; 00202 } 00203 00204 Thumb2SizeReduce::Thumb2SizeReduce() : MachineFunctionPass(ID) { 00205 OptimizeSize = MinimizeSize = false; 00206 for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) { 00207 unsigned FromOpc = ReduceTable[i].WideOpc; 00208 if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second) 00209 assert(false && "Duplicated entries?"); 00210 } 00211 } 00212 00213 static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) { 00214 for (const uint16_t *Regs = MCID.getImplicitDefs(); *Regs; ++Regs) 00215 if (*Regs == ARM::CPSR) 00216 return true; 00217 return false; 00218 } 00219 00220 // Check for a likely high-latency flag def. 00221 static bool isHighLatencyCPSR(MachineInstr *Def) { 00222 switch(Def->getOpcode()) { 00223 case ARM::FMSTAT: 00224 case ARM::tMUL: 00225 return true; 00226 } 00227 return false; 00228 } 00229 00230 /// canAddPseudoFlagDep - For A9 (and other out-of-order) implementations, 00231 /// the 's' 16-bit instruction partially update CPSR. Abort the 00232 /// transformation to avoid adding false dependency on last CPSR setting 00233 /// instruction which hurts the ability for out-of-order execution engine 00234 /// to do register renaming magic. 00235 /// This function checks if there is a read-of-write dependency between the 00236 /// last instruction that defines the CPSR and the current instruction. If there 00237 /// is, then there is no harm done since the instruction cannot be retired 00238 /// before the CPSR setting instruction anyway. 00239 /// Note, we are not doing full dependency analysis here for the sake of compile 00240 /// time. We're not looking for cases like: 00241 /// r0 = muls ... 00242 /// r1 = add.w r0, ... 00243 /// ... 00244 /// = mul.w r1 00245 /// In this case it would have been ok to narrow the mul.w to muls since there 00246 /// are indirect RAW dependency between the muls and the mul.w 00247 bool 00248 Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) { 00249 // Disable the check for -Oz (aka OptimizeForSizeHarder). 00250 if (MinimizeSize || !STI->avoidCPSRPartialUpdate()) 00251 return false; 00252 00253 if (!CPSRDef) 00254 // If this BB loops back to itself, conservatively avoid narrowing the 00255 // first instruction that does partial flag update. 00256 return HighLatencyCPSR || FirstInSelfLoop; 00257 00258 SmallSet<unsigned, 2> Defs; 00259 for (const MachineOperand &MO : CPSRDef->operands()) { 00260 if (!MO.isReg() || MO.isUndef() || MO.isUse()) 00261 continue; 00262 unsigned Reg = MO.getReg(); 00263 if (Reg == 0 || Reg == ARM::CPSR) 00264 continue; 00265 Defs.insert(Reg); 00266 } 00267 00268 for (const MachineOperand &MO : Use->operands()) { 00269 if (!MO.isReg() || MO.isUndef() || MO.isDef()) 00270 continue; 00271 unsigned Reg = MO.getReg(); 00272 if (Defs.count(Reg)) 00273 return false; 00274 } 00275 00276 // If the current CPSR has high latency, try to avoid the false dependency. 00277 if (HighLatencyCPSR) 00278 return true; 00279 00280 // tMOVi8 usually doesn't start long dependency chains, and there are a lot 00281 // of them, so always shrink them when CPSR doesn't have high latency. 00282 if (Use->getOpcode() == ARM::t2MOVi || 00283 Use->getOpcode() == ARM::t2MOVi16) 00284 return false; 00285 00286 // No read-after-write dependency. The narrowing will add false dependency. 00287 return true; 00288 } 00289 00290 bool 00291 Thumb2SizeReduce::VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry, 00292 bool is2Addr, ARMCC::CondCodes Pred, 00293 bool LiveCPSR, bool &HasCC, bool &CCDead) { 00294 if ((is2Addr && Entry.PredCC2 == 0) || 00295 (!is2Addr && Entry.PredCC1 == 0)) { 00296 if (Pred == ARMCC::AL) { 00297 // Not predicated, must set CPSR. 00298 if (!HasCC) { 00299 // Original instruction was not setting CPSR, but CPSR is not 00300 // currently live anyway. It's ok to set it. The CPSR def is 00301 // dead though. 00302 if (!LiveCPSR) { 00303 HasCC = true; 00304 CCDead = true; 00305 return true; 00306 } 00307 return false; 00308 } 00309 } else { 00310 // Predicated, must not set CPSR. 00311 if (HasCC) 00312 return false; 00313 } 00314 } else if ((is2Addr && Entry.PredCC2 == 2) || 00315 (!is2Addr && Entry.PredCC1 == 2)) { 00316 /// Old opcode has an optional def of CPSR. 00317 if (HasCC) 00318 return true; 00319 // If old opcode does not implicitly define CPSR, then it's not ok since 00320 // these new opcodes' CPSR def is not meant to be thrown away. e.g. CMP. 00321 if (!HasImplicitCPSRDef(MI->getDesc())) 00322 return false; 00323 HasCC = true; 00324 } else { 00325 // 16-bit instruction does not set CPSR. 00326 if (HasCC) 00327 return false; 00328 } 00329 00330 return true; 00331 } 00332 00333 static bool VerifyLowRegs(MachineInstr *MI) { 00334 unsigned Opc = MI->getOpcode(); 00335 bool isPCOk = (Opc == ARM::t2LDMIA_RET || Opc == ARM::t2LDMIA || 00336 Opc == ARM::t2LDMDB || Opc == ARM::t2LDMIA_UPD || 00337 Opc == ARM::t2LDMDB_UPD); 00338 bool isLROk = (Opc == ARM::t2STMDB_UPD); 00339 bool isSPOk = isPCOk || isLROk; 00340 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 00341 const MachineOperand &MO = MI->getOperand(i); 00342 if (!MO.isReg() || MO.isImplicit()) 00343 continue; 00344 unsigned Reg = MO.getReg(); 00345 if (Reg == 0 || Reg == ARM::CPSR) 00346 continue; 00347 if (isPCOk && Reg == ARM::PC) 00348 continue; 00349 if (isLROk && Reg == ARM::LR) 00350 continue; 00351 if (Reg == ARM::SP) { 00352 if (isSPOk) 00353 continue; 00354 if (i == 1 && (Opc == ARM::t2LDRi12 || Opc == ARM::t2STRi12)) 00355 // Special case for these ldr / str with sp as base register. 00356 continue; 00357 } 00358 if (!isARMLowRegister(Reg)) 00359 return false; 00360 } 00361 return true; 00362 } 00363 00364 bool 00365 Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, 00366 const ReduceEntry &Entry) { 00367 if (ReduceLimitLdSt != -1 && ((int)NumLdSts >= ReduceLimitLdSt)) 00368 return false; 00369 00370 unsigned Scale = 1; 00371 bool HasImmOffset = false; 00372 bool HasShift = false; 00373 bool HasOffReg = true; 00374 bool isLdStMul = false; 00375 unsigned Opc = Entry.NarrowOpc1; 00376 unsigned OpNum = 3; // First 'rest' of operands. 00377 uint8_t ImmLimit = Entry.Imm1Limit; 00378 00379 switch (Entry.WideOpc) { 00380 default: 00381 llvm_unreachable("Unexpected Thumb2 load / store opcode!"); 00382 case ARM::t2LDRi12: 00383 case ARM::t2STRi12: 00384 if (MI->getOperand(1).getReg() == ARM::SP) { 00385 Opc = Entry.NarrowOpc2; 00386 ImmLimit = Entry.Imm2Limit; 00387 HasOffReg = false; 00388 } 00389 00390 Scale = 4; 00391 HasImmOffset = true; 00392 HasOffReg = false; 00393 break; 00394 case ARM::t2LDRBi12: 00395 case ARM::t2STRBi12: 00396 HasImmOffset = true; 00397 HasOffReg = false; 00398 break; 00399 case ARM::t2LDRHi12: 00400 case ARM::t2STRHi12: 00401 Scale = 2; 00402 HasImmOffset = true; 00403 HasOffReg = false; 00404 break; 00405 case ARM::t2LDRs: 00406 case ARM::t2LDRBs: 00407 case ARM::t2LDRHs: 00408 case ARM::t2LDRSBs: 00409 case ARM::t2LDRSHs: 00410 case ARM::t2STRs: 00411 case ARM::t2STRBs: 00412 case ARM::t2STRHs: 00413 HasShift = true; 00414 OpNum = 4; 00415 break; 00416 case ARM::t2LDMIA: 00417 case ARM::t2LDMDB: { 00418 unsigned BaseReg = MI->getOperand(0).getReg(); 00419 if (!isARMLowRegister(BaseReg) || Entry.WideOpc != ARM::t2LDMIA) 00420 return false; 00421 00422 // For the non-writeback version (this one), the base register must be 00423 // one of the registers being loaded. 00424 bool isOK = false; 00425 for (unsigned i = 4; i < MI->getNumOperands(); ++i) { 00426 if (MI->getOperand(i).getReg() == BaseReg) { 00427 isOK = true; 00428 break; 00429 } 00430 } 00431 00432 if (!isOK) 00433 return false; 00434 00435 OpNum = 0; 00436 isLdStMul = true; 00437 break; 00438 } 00439 case ARM::t2LDMIA_RET: { 00440 unsigned BaseReg = MI->getOperand(1).getReg(); 00441 if (BaseReg != ARM::SP) 00442 return false; 00443 Opc = Entry.NarrowOpc2; // tPOP_RET 00444 OpNum = 2; 00445 isLdStMul = true; 00446 break; 00447 } 00448 case ARM::t2LDMIA_UPD: 00449 case ARM::t2LDMDB_UPD: 00450 case ARM::t2STMIA_UPD: 00451 case ARM::t2STMDB_UPD: { 00452 OpNum = 0; 00453 00454 unsigned BaseReg = MI->getOperand(1).getReg(); 00455 if (BaseReg == ARM::SP && 00456 (Entry.WideOpc == ARM::t2LDMIA_UPD || 00457 Entry.WideOpc == ARM::t2STMDB_UPD)) { 00458 Opc = Entry.NarrowOpc2; // tPOP or tPUSH 00459 OpNum = 2; 00460 } else if (!isARMLowRegister(BaseReg) || 00461 (Entry.WideOpc != ARM::t2LDMIA_UPD && 00462 Entry.WideOpc != ARM::t2STMIA_UPD)) { 00463 return false; 00464 } 00465 00466 isLdStMul = true; 00467 break; 00468 } 00469 } 00470 00471 unsigned OffsetReg = 0; 00472 bool OffsetKill = false; 00473 if (HasShift) { 00474 OffsetReg = MI->getOperand(2).getReg(); 00475 OffsetKill = MI->getOperand(2).isKill(); 00476 00477 if (MI->getOperand(3).getImm()) 00478 // Thumb1 addressing mode doesn't support shift. 00479 return false; 00480 } 00481 00482 unsigned OffsetImm = 0; 00483 if (HasImmOffset) { 00484 OffsetImm = MI->getOperand(2).getImm(); 00485 unsigned MaxOffset = ((1 << ImmLimit) - 1) * Scale; 00486 00487 if ((OffsetImm & (Scale - 1)) || OffsetImm > MaxOffset) 00488 // Make sure the immediate field fits. 00489 return false; 00490 } 00491 00492 // Add the 16-bit load / store instruction. 00493 DebugLoc dl = MI->getDebugLoc(); 00494 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, TII->get(Opc)); 00495 if (!isLdStMul) { 00496 MIB.addOperand(MI->getOperand(0)); 00497 MIB.addOperand(MI->getOperand(1)); 00498 00499 if (HasImmOffset) 00500 MIB.addImm(OffsetImm / Scale); 00501 00502 assert((!HasShift || OffsetReg) && "Invalid so_reg load / store address!"); 00503 00504 if (HasOffReg) 00505 MIB.addReg(OffsetReg, getKillRegState(OffsetKill)); 00506 } 00507 00508 // Transfer the rest of operands. 00509 for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum) 00510 MIB.addOperand(MI->getOperand(OpNum)); 00511 00512 // Transfer memoperands. 00513 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 00514 00515 // Transfer MI flags. 00516 MIB.setMIFlags(MI->getFlags()); 00517 00518 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB); 00519 00520 MBB.erase_instr(MI); 00521 ++NumLdSts; 00522 return true; 00523 } 00524 00525 bool 00526 Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, 00527 const ReduceEntry &Entry, 00528 bool LiveCPSR, bool IsSelfLoop) { 00529 unsigned Opc = MI->getOpcode(); 00530 if (Opc == ARM::t2ADDri) { 00531 // If the source register is SP, try to reduce to tADDrSPi, otherwise 00532 // it's a normal reduce. 00533 if (MI->getOperand(1).getReg() != ARM::SP) { 00534 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 00535 return true; 00536 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 00537 } 00538 // Try to reduce to tADDrSPi. 00539 unsigned Imm = MI->getOperand(2).getImm(); 00540 // The immediate must be in range, the destination register must be a low 00541 // reg, the predicate must be "always" and the condition flags must not 00542 // be being set. 00543 if (Imm & 3 || Imm > 1020) 00544 return false; 00545 if (!isARMLowRegister(MI->getOperand(0).getReg())) 00546 return false; 00547 if (MI->getOperand(3).getImm() != ARMCC::AL) 00548 return false; 00549 const MCInstrDesc &MCID = MI->getDesc(); 00550 if (MCID.hasOptionalDef() && 00551 MI->getOperand(MCID.getNumOperands()-1).getReg() == ARM::CPSR) 00552 return false; 00553 00554 MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(), 00555 TII->get(ARM::tADDrSPi)) 00556 .addOperand(MI->getOperand(0)) 00557 .addOperand(MI->getOperand(1)) 00558 .addImm(Imm / 4); // The tADDrSPi has an implied scale by four. 00559 AddDefaultPred(MIB); 00560 00561 // Transfer MI flags. 00562 MIB.setMIFlags(MI->getFlags()); 00563 00564 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " <<*MIB); 00565 00566 MBB.erase_instr(MI); 00567 ++NumNarrows; 00568 return true; 00569 } 00570 00571 if (Entry.LowRegs1 && !VerifyLowRegs(MI)) 00572 return false; 00573 00574 if (MI->mayLoad() || MI->mayStore()) 00575 return ReduceLoadStore(MBB, MI, Entry); 00576 00577 switch (Opc) { 00578 default: break; 00579 case ARM::t2ADDSri: 00580 case ARM::t2ADDSrr: { 00581 unsigned PredReg = 0; 00582 if (getInstrPredicate(MI, PredReg) == ARMCC::AL) { 00583 switch (Opc) { 00584 default: break; 00585 case ARM::t2ADDSri: { 00586 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 00587 return true; 00588 // fallthrough 00589 } 00590 case ARM::t2ADDSrr: 00591 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 00592 } 00593 } 00594 break; 00595 } 00596 case ARM::t2RSBri: 00597 case ARM::t2RSBSri: 00598 case ARM::t2SXTB: 00599 case ARM::t2SXTH: 00600 case ARM::t2UXTB: 00601 case ARM::t2UXTH: 00602 if (MI->getOperand(2).getImm() == 0) 00603 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 00604 break; 00605 case ARM::t2MOVi16: 00606 // Can convert only 'pure' immediate operands, not immediates obtained as 00607 // globals' addresses. 00608 if (MI->getOperand(1).isImm()) 00609 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 00610 break; 00611 case ARM::t2CMPrr: { 00612 // Try to reduce to the lo-reg only version first. Why there are two 00613 // versions of the instruction is a mystery. 00614 // It would be nice to just have two entries in the master table that 00615 // are prioritized, but the table assumes a unique entry for each 00616 // source insn opcode. So for now, we hack a local entry record to use. 00617 static const ReduceEntry NarrowEntry = 00618 { ARM::t2CMPrr,ARM::tCMPr, 0, 0, 0, 1, 1,2, 0, 0,1,0 }; 00619 if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, IsSelfLoop)) 00620 return true; 00621 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 00622 } 00623 } 00624 return false; 00625 } 00626 00627 bool 00628 Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, 00629 const ReduceEntry &Entry, 00630 bool LiveCPSR, bool IsSelfLoop) { 00631 00632 if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr)) 00633 return false; 00634 00635 if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs && 00636 STI->avoidMOVsShifterOperand()) 00637 // Don't issue movs with shifter operand for some CPUs unless we 00638 // are optimizing / minimizing for size. 00639 return false; 00640 00641 unsigned Reg0 = MI->getOperand(0).getReg(); 00642 unsigned Reg1 = MI->getOperand(1).getReg(); 00643 // t2MUL is "special". The tied source operand is second, not first. 00644 if (MI->getOpcode() == ARM::t2MUL) { 00645 unsigned Reg2 = MI->getOperand(2).getReg(); 00646 // Early exit if the regs aren't all low regs. 00647 if (!isARMLowRegister(Reg0) || !isARMLowRegister(Reg1) 00648 || !isARMLowRegister(Reg2)) 00649 return false; 00650 if (Reg0 != Reg2) { 00651 // If the other operand also isn't the same as the destination, we 00652 // can't reduce. 00653 if (Reg1 != Reg0) 00654 return false; 00655 // Try to commute the operands to make it a 2-address instruction. 00656 MachineInstr *CommutedMI = TII->commuteInstruction(MI); 00657 if (!CommutedMI) 00658 return false; 00659 } 00660 } else if (Reg0 != Reg1) { 00661 // Try to commute the operands to make it a 2-address instruction. 00662 unsigned CommOpIdx1, CommOpIdx2; 00663 if (!TII->findCommutedOpIndices(MI, CommOpIdx1, CommOpIdx2) || 00664 CommOpIdx1 != 1 || MI->getOperand(CommOpIdx2).getReg() != Reg0) 00665 return false; 00666 MachineInstr *CommutedMI = TII->commuteInstruction(MI); 00667 if (!CommutedMI) 00668 return false; 00669 } 00670 if (Entry.LowRegs2 && !isARMLowRegister(Reg0)) 00671 return false; 00672 if (Entry.Imm2Limit) { 00673 unsigned Imm = MI->getOperand(2).getImm(); 00674 unsigned Limit = (1 << Entry.Imm2Limit) - 1; 00675 if (Imm > Limit) 00676 return false; 00677 } else { 00678 unsigned Reg2 = MI->getOperand(2).getReg(); 00679 if (Entry.LowRegs2 && !isARMLowRegister(Reg2)) 00680 return false; 00681 } 00682 00683 // Check if it's possible / necessary to transfer the predicate. 00684 const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc2); 00685 unsigned PredReg = 0; 00686 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg); 00687 bool SkipPred = false; 00688 if (Pred != ARMCC::AL) { 00689 if (!NewMCID.isPredicable()) 00690 // Can't transfer predicate, fail. 00691 return false; 00692 } else { 00693 SkipPred = !NewMCID.isPredicable(); 00694 } 00695 00696 bool HasCC = false; 00697 bool CCDead = false; 00698 const MCInstrDesc &MCID = MI->getDesc(); 00699 if (MCID.hasOptionalDef()) { 00700 unsigned NumOps = MCID.getNumOperands(); 00701 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR); 00702 if (HasCC && MI->getOperand(NumOps-1).isDead()) 00703 CCDead = true; 00704 } 00705 if (!VerifyPredAndCC(MI, Entry, true, Pred, LiveCPSR, HasCC, CCDead)) 00706 return false; 00707 00708 // Avoid adding a false dependency on partial flag update by some 16-bit 00709 // instructions which has the 's' bit set. 00710 if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC && 00711 canAddPseudoFlagDep(MI, IsSelfLoop)) 00712 return false; 00713 00714 // Add the 16-bit instruction. 00715 DebugLoc dl = MI->getDebugLoc(); 00716 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); 00717 MIB.addOperand(MI->getOperand(0)); 00718 if (NewMCID.hasOptionalDef()) { 00719 if (HasCC) 00720 AddDefaultT1CC(MIB, CCDead); 00721 else 00722 AddNoT1CC(MIB); 00723 } 00724 00725 // Transfer the rest of operands. 00726 unsigned NumOps = MCID.getNumOperands(); 00727 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { 00728 if (i < NumOps && MCID.OpInfo[i].isOptionalDef()) 00729 continue; 00730 if (SkipPred && MCID.OpInfo[i].isPredicate()) 00731 continue; 00732 MIB.addOperand(MI->getOperand(i)); 00733 } 00734 00735 // Transfer MI flags. 00736 MIB.setMIFlags(MI->getFlags()); 00737 00738 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB); 00739 00740 MBB.erase_instr(MI); 00741 ++Num2Addrs; 00742 return true; 00743 } 00744 00745 bool 00746 Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, 00747 const ReduceEntry &Entry, 00748 bool LiveCPSR, bool IsSelfLoop) { 00749 if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit)) 00750 return false; 00751 00752 if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs && 00753 STI->avoidMOVsShifterOperand()) 00754 // Don't issue movs with shifter operand for some CPUs unless we 00755 // are optimizing / minimizing for size. 00756 return false; 00757 00758 unsigned Limit = ~0U; 00759 if (Entry.Imm1Limit) 00760 Limit = (1 << Entry.Imm1Limit) - 1; 00761 00762 const MCInstrDesc &MCID = MI->getDesc(); 00763 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) { 00764 if (MCID.OpInfo[i].isPredicate()) 00765 continue; 00766 const MachineOperand &MO = MI->getOperand(i); 00767 if (MO.isReg()) { 00768 unsigned Reg = MO.getReg(); 00769 if (!Reg || Reg == ARM::CPSR) 00770 continue; 00771 if (Entry.LowRegs1 && !isARMLowRegister(Reg)) 00772 return false; 00773 } else if (MO.isImm() && 00774 !MCID.OpInfo[i].isPredicate()) { 00775 if (((unsigned)MO.getImm()) > Limit) 00776 return false; 00777 } 00778 } 00779 00780 // Check if it's possible / necessary to transfer the predicate. 00781 const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc1); 00782 unsigned PredReg = 0; 00783 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg); 00784 bool SkipPred = false; 00785 if (Pred != ARMCC::AL) { 00786 if (!NewMCID.isPredicable()) 00787 // Can't transfer predicate, fail. 00788 return false; 00789 } else { 00790 SkipPred = !NewMCID.isPredicable(); 00791 } 00792 00793 bool HasCC = false; 00794 bool CCDead = false; 00795 if (MCID.hasOptionalDef()) { 00796 unsigned NumOps = MCID.getNumOperands(); 00797 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR); 00798 if (HasCC && MI->getOperand(NumOps-1).isDead()) 00799 CCDead = true; 00800 } 00801 if (!VerifyPredAndCC(MI, Entry, false, Pred, LiveCPSR, HasCC, CCDead)) 00802 return false; 00803 00804 // Avoid adding a false dependency on partial flag update by some 16-bit 00805 // instructions which has the 's' bit set. 00806 if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC && 00807 canAddPseudoFlagDep(MI, IsSelfLoop)) 00808 return false; 00809 00810 // Add the 16-bit instruction. 00811 DebugLoc dl = MI->getDebugLoc(); 00812 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); 00813 MIB.addOperand(MI->getOperand(0)); 00814 if (NewMCID.hasOptionalDef()) { 00815 if (HasCC) 00816 AddDefaultT1CC(MIB, CCDead); 00817 else 00818 AddNoT1CC(MIB); 00819 } 00820 00821 // Transfer the rest of operands. 00822 unsigned NumOps = MCID.getNumOperands(); 00823 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { 00824 if (i < NumOps && MCID.OpInfo[i].isOptionalDef()) 00825 continue; 00826 if ((MCID.getOpcode() == ARM::t2RSBSri || 00827 MCID.getOpcode() == ARM::t2RSBri || 00828 MCID.getOpcode() == ARM::t2SXTB || 00829 MCID.getOpcode() == ARM::t2SXTH || 00830 MCID.getOpcode() == ARM::t2UXTB || 00831 MCID.getOpcode() == ARM::t2UXTH) && i == 2) 00832 // Skip the zero immediate operand, it's now implicit. 00833 continue; 00834 bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate()); 00835 if (SkipPred && isPred) 00836 continue; 00837 const MachineOperand &MO = MI->getOperand(i); 00838 if (MO.isReg() && MO.isImplicit() && MO.getReg() == ARM::CPSR) 00839 // Skip implicit def of CPSR. Either it's modeled as an optional 00840 // def now or it's already an implicit def on the new instruction. 00841 continue; 00842 MIB.addOperand(MO); 00843 } 00844 if (!MCID.isPredicable() && NewMCID.isPredicable()) 00845 AddDefaultPred(MIB); 00846 00847 // Transfer MI flags. 00848 MIB.setMIFlags(MI->getFlags()); 00849 00850 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB); 00851 00852 MBB.erase_instr(MI); 00853 ++NumNarrows; 00854 return true; 00855 } 00856 00857 static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR) { 00858 bool HasDef = false; 00859 for (const MachineOperand &MO : MI.operands()) { 00860 if (!MO.isReg() || MO.isUndef() || MO.isUse()) 00861 continue; 00862 if (MO.getReg() != ARM::CPSR) 00863 continue; 00864 00865 DefCPSR = true; 00866 if (!MO.isDead()) 00867 HasDef = true; 00868 } 00869 00870 return HasDef || LiveCPSR; 00871 } 00872 00873 static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) { 00874 for (const MachineOperand &MO : MI.operands()) { 00875 if (!MO.isReg() || MO.isUndef() || MO.isDef()) 00876 continue; 00877 if (MO.getReg() != ARM::CPSR) 00878 continue; 00879 assert(LiveCPSR && "CPSR liveness tracking is wrong!"); 00880 if (MO.isKill()) { 00881 LiveCPSR = false; 00882 break; 00883 } 00884 } 00885 00886 return LiveCPSR; 00887 } 00888 00889 bool Thumb2SizeReduce::ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI, 00890 bool LiveCPSR, bool IsSelfLoop) { 00891 unsigned Opcode = MI->getOpcode(); 00892 DenseMap<unsigned, unsigned>::iterator OPI = ReduceOpcodeMap.find(Opcode); 00893 if (OPI == ReduceOpcodeMap.end()) 00894 return false; 00895 const ReduceEntry &Entry = ReduceTable[OPI->second]; 00896 00897 // Don't attempt normal reductions on "special" cases for now. 00898 if (Entry.Special) 00899 return ReduceSpecial(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 00900 00901 // Try to transform to a 16-bit two-address instruction. 00902 if (Entry.NarrowOpc2 && 00903 ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 00904 return true; 00905 00906 // Try to transform to a 16-bit non-two-address instruction. 00907 if (Entry.NarrowOpc1 && 00908 ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 00909 return true; 00910 00911 return false; 00912 } 00913 00914 bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) { 00915 bool Modified = false; 00916 00917 // Yes, CPSR could be livein. 00918 bool LiveCPSR = MBB.isLiveIn(ARM::CPSR); 00919 MachineInstr *BundleMI = nullptr; 00920 00921 CPSRDef = nullptr; 00922 HighLatencyCPSR = false; 00923 00924 // Check predecessors for the latest CPSRDef. 00925 for (auto *Pred : MBB.predecessors()) { 00926 const MBBInfo &PInfo = BlockInfo[Pred->getNumber()]; 00927 if (!PInfo.Visited) { 00928 // Since blocks are visited in RPO, this must be a back-edge. 00929 continue; 00930 } 00931 if (PInfo.HighLatencyCPSR) { 00932 HighLatencyCPSR = true; 00933 break; 00934 } 00935 } 00936 00937 // If this BB loops back to itself, conservatively avoid narrowing the 00938 // first instruction that does partial flag update. 00939 bool IsSelfLoop = MBB.isSuccessor(&MBB); 00940 MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),E = MBB.instr_end(); 00941 MachineBasicBlock::instr_iterator NextMII; 00942 for (; MII != E; MII = NextMII) { 00943 NextMII = std::next(MII); 00944 00945 MachineInstr *MI = &*MII; 00946 if (MI->isBundle()) { 00947 BundleMI = MI; 00948 continue; 00949 } 00950 if (MI->isDebugValue()) 00951 continue; 00952 00953 LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR); 00954 00955 // Does NextMII belong to the same bundle as MI? 00956 bool NextInSameBundle = NextMII != E && NextMII->isBundledWithPred(); 00957 00958 if (ReduceMI(MBB, MI, LiveCPSR, IsSelfLoop)) { 00959 Modified = true; 00960 MachineBasicBlock::instr_iterator I = std::prev(NextMII); 00961 MI = &*I; 00962 // Removing and reinserting the first instruction in a bundle will break 00963 // up the bundle. Fix the bundling if it was broken. 00964 if (NextInSameBundle && !NextMII->isBundledWithPred()) 00965 NextMII->bundleWithPred(); 00966 } 00967 00968 if (!NextInSameBundle && MI->isInsideBundle()) { 00969 // FIXME: Since post-ra scheduler operates on bundles, the CPSR kill 00970 // marker is only on the BUNDLE instruction. Process the BUNDLE 00971 // instruction as we finish with the bundled instruction to work around 00972 // the inconsistency. 00973 if (BundleMI->killsRegister(ARM::CPSR)) 00974 LiveCPSR = false; 00975 MachineOperand *MO = BundleMI->findRegisterDefOperand(ARM::CPSR); 00976 if (MO && !MO->isDead()) 00977 LiveCPSR = true; 00978 MO = BundleMI->findRegisterUseOperand(ARM::CPSR); 00979 if (MO && !MO->isKill()) 00980 LiveCPSR = true; 00981 } 00982 00983 bool DefCPSR = false; 00984 LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR, DefCPSR); 00985 if (MI->isCall()) { 00986 // Calls don't really set CPSR. 00987 CPSRDef = nullptr; 00988 HighLatencyCPSR = false; 00989 IsSelfLoop = false; 00990 } else if (DefCPSR) { 00991 // This is the last CPSR defining instruction. 00992 CPSRDef = MI; 00993 HighLatencyCPSR = isHighLatencyCPSR(CPSRDef); 00994 IsSelfLoop = false; 00995 } 00996 } 00997 00998 MBBInfo &Info = BlockInfo[MBB.getNumber()]; 00999 Info.HighLatencyCPSR = HighLatencyCPSR; 01000 Info.Visited = true; 01001 return Modified; 01002 } 01003 01004 bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) { 01005 const TargetMachine &TM = MF.getTarget(); 01006 TII = static_cast<const Thumb2InstrInfo *>( 01007 TM.getSubtargetImpl()->getInstrInfo()); 01008 STI = &TM.getSubtarget<ARMSubtarget>(); 01009 01010 // Optimizing / minimizing size? 01011 AttributeSet FnAttrs = MF.getFunction()->getAttributes(); 01012 OptimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex, 01013 Attribute::OptimizeForSize); 01014 MinimizeSize = 01015 FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize); 01016 01017 BlockInfo.clear(); 01018 BlockInfo.resize(MF.getNumBlockIDs()); 01019 01020 // Visit blocks in reverse post-order so LastCPSRDef is known for all 01021 // predecessors. 01022 ReversePostOrderTraversal<MachineFunction*> RPOT(&MF); 01023 bool Modified = false; 01024 for (ReversePostOrderTraversal<MachineFunction*>::rpo_iterator 01025 I = RPOT.begin(), E = RPOT.end(); I != E; ++I) 01026 Modified |= ReduceMBB(**I); 01027 return Modified; 01028 } 01029 01030 /// createThumb2SizeReductionPass - Returns an instance of the Thumb2 size 01031 /// reduction pass. 01032 FunctionPass *llvm::createThumb2SizeReductionPass() { 01033 return new Thumb2SizeReduce(); 01034 }