LLVM API Documentation
00001 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This pass munges the code in the input function to better prepare it for 00011 // SelectionDAG-based code generation. This works around limitations in it's 00012 // basic-block-at-a-time approach. It should eventually be removed. 00013 // 00014 //===----------------------------------------------------------------------===// 00015 00016 #include "llvm/CodeGen/Passes.h" 00017 #include "llvm/ADT/DenseMap.h" 00018 #include "llvm/ADT/SmallSet.h" 00019 #include "llvm/ADT/Statistic.h" 00020 #include "llvm/Analysis/InstructionSimplify.h" 00021 #include "llvm/IR/CallSite.h" 00022 #include "llvm/IR/Constants.h" 00023 #include "llvm/IR/DataLayout.h" 00024 #include "llvm/IR/DerivedTypes.h" 00025 #include "llvm/IR/Dominators.h" 00026 #include "llvm/IR/Function.h" 00027 #include "llvm/IR/GetElementPtrTypeIterator.h" 00028 #include "llvm/IR/IRBuilder.h" 00029 #include "llvm/IR/InlineAsm.h" 00030 #include "llvm/IR/Instructions.h" 00031 #include "llvm/IR/IntrinsicInst.h" 00032 #include "llvm/IR/PatternMatch.h" 00033 #include "llvm/IR/ValueHandle.h" 00034 #include "llvm/IR/ValueMap.h" 00035 #include "llvm/Pass.h" 00036 #include "llvm/Support/CommandLine.h" 00037 #include "llvm/Support/Debug.h" 00038 #include "llvm/Support/raw_ostream.h" 00039 #include "llvm/Target/TargetLibraryInfo.h" 00040 #include "llvm/Target/TargetLowering.h" 00041 #include "llvm/Target/TargetSubtargetInfo.h" 00042 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 00043 #include "llvm/Transforms/Utils/BuildLibCalls.h" 00044 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 00045 #include "llvm/Transforms/Utils/Local.h" 00046 using namespace llvm; 00047 using namespace llvm::PatternMatch; 00048 00049 #define DEBUG_TYPE "codegenprepare" 00050 00051 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 00052 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 00053 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 00054 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 00055 "sunken Cmps"); 00056 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 00057 "of sunken Casts"); 00058 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 00059 "computations were sunk"); 00060 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 00061 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 00062 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 00063 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 00064 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 00065 STATISTIC(NumAndCmpsMoved, "Number of and/cmp's pushed into branches"); 00066 00067 static cl::opt<bool> DisableBranchOpts( 00068 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 00069 cl::desc("Disable branch optimizations in CodeGenPrepare")); 00070 00071 static cl::opt<bool> DisableSelectToBranch( 00072 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 00073 cl::desc("Disable select to branch conversion.")); 00074 00075 static cl::opt<bool> AddrSinkUsingGEPs( 00076 "addr-sink-using-gep", cl::Hidden, cl::init(false), 00077 cl::desc("Address sinking in CGP using GEPs.")); 00078 00079 static cl::opt<bool> EnableAndCmpSinking( 00080 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 00081 cl::desc("Enable sinkinig and/cmp into branches.")); 00082 00083 namespace { 00084 typedef SmallPtrSet<Instruction *, 16> SetOfInstrs; 00085 typedef DenseMap<Instruction *, Type *> InstrToOrigTy; 00086 00087 class CodeGenPrepare : public FunctionPass { 00088 /// TLI - Keep a pointer of a TargetLowering to consult for determining 00089 /// transformation profitability. 00090 const TargetMachine *TM; 00091 const TargetLowering *TLI; 00092 const TargetLibraryInfo *TLInfo; 00093 DominatorTree *DT; 00094 00095 /// CurInstIterator - As we scan instructions optimizing them, this is the 00096 /// next instruction to optimize. Xforms that can invalidate this should 00097 /// update it. 00098 BasicBlock::iterator CurInstIterator; 00099 00100 /// Keeps track of non-local addresses that have been sunk into a block. 00101 /// This allows us to avoid inserting duplicate code for blocks with 00102 /// multiple load/stores of the same address. 00103 ValueMap<Value*, Value*> SunkAddrs; 00104 00105 /// Keeps track of all truncates inserted for the current function. 00106 SetOfInstrs InsertedTruncsSet; 00107 /// Keeps track of the type of the related instruction before their 00108 /// promotion for the current function. 00109 InstrToOrigTy PromotedInsts; 00110 00111 /// ModifiedDT - If CFG is modified in anyway, dominator tree may need to 00112 /// be updated. 00113 bool ModifiedDT; 00114 00115 /// OptSize - True if optimizing for size. 00116 bool OptSize; 00117 00118 public: 00119 static char ID; // Pass identification, replacement for typeid 00120 explicit CodeGenPrepare(const TargetMachine *TM = nullptr) 00121 : FunctionPass(ID), TM(TM), TLI(nullptr) { 00122 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 00123 } 00124 bool runOnFunction(Function &F) override; 00125 00126 const char *getPassName() const override { return "CodeGen Prepare"; } 00127 00128 void getAnalysisUsage(AnalysisUsage &AU) const override { 00129 AU.addPreserved<DominatorTreeWrapperPass>(); 00130 AU.addRequired<TargetLibraryInfo>(); 00131 } 00132 00133 private: 00134 bool EliminateFallThrough(Function &F); 00135 bool EliminateMostlyEmptyBlocks(Function &F); 00136 bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 00137 void EliminateMostlyEmptyBlock(BasicBlock *BB); 00138 bool OptimizeBlock(BasicBlock &BB); 00139 bool OptimizeInst(Instruction *I); 00140 bool OptimizeMemoryInst(Instruction *I, Value *Addr, Type *AccessTy); 00141 bool OptimizeInlineAsmInst(CallInst *CS); 00142 bool OptimizeCallInst(CallInst *CI); 00143 bool MoveExtToFormExtLoad(Instruction *I); 00144 bool OptimizeExtUses(Instruction *I); 00145 bool OptimizeSelectInst(SelectInst *SI); 00146 bool OptimizeShuffleVectorInst(ShuffleVectorInst *SI); 00147 bool DupRetToEnableTailCallOpts(BasicBlock *BB); 00148 bool PlaceDbgValues(Function &F); 00149 bool sinkAndCmp(Function &F); 00150 }; 00151 } 00152 00153 char CodeGenPrepare::ID = 0; 00154 INITIALIZE_TM_PASS(CodeGenPrepare, "codegenprepare", 00155 "Optimize for code generation", false, false) 00156 00157 FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) { 00158 return new CodeGenPrepare(TM); 00159 } 00160 00161 bool CodeGenPrepare::runOnFunction(Function &F) { 00162 if (skipOptnoneFunction(F)) 00163 return false; 00164 00165 bool EverMadeChange = false; 00166 // Clear per function information. 00167 InsertedTruncsSet.clear(); 00168 PromotedInsts.clear(); 00169 00170 ModifiedDT = false; 00171 if (TM) 00172 TLI = TM->getSubtargetImpl()->getTargetLowering(); 00173 TLInfo = &getAnalysis<TargetLibraryInfo>(); 00174 DominatorTreeWrapperPass *DTWP = 00175 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 00176 DT = DTWP ? &DTWP->getDomTree() : nullptr; 00177 OptSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, 00178 Attribute::OptimizeForSize); 00179 00180 /// This optimization identifies DIV instructions that can be 00181 /// profitably bypassed and carried out with a shorter, faster divide. 00182 if (!OptSize && TLI && TLI->isSlowDivBypassed()) { 00183 const DenseMap<unsigned int, unsigned int> &BypassWidths = 00184 TLI->getBypassSlowDivWidths(); 00185 for (Function::iterator I = F.begin(); I != F.end(); I++) 00186 EverMadeChange |= bypassSlowDivision(F, I, BypassWidths); 00187 } 00188 00189 // Eliminate blocks that contain only PHI nodes and an 00190 // unconditional branch. 00191 EverMadeChange |= EliminateMostlyEmptyBlocks(F); 00192 00193 // llvm.dbg.value is far away from the value then iSel may not be able 00194 // handle it properly. iSel will drop llvm.dbg.value if it can not 00195 // find a node corresponding to the value. 00196 EverMadeChange |= PlaceDbgValues(F); 00197 00198 // If there is a mask, compare against zero, and branch that can be combined 00199 // into a single target instruction, push the mask and compare into branch 00200 // users. Do this before OptimizeBlock -> OptimizeInst -> 00201 // OptimizeCmpExpression, which perturbs the pattern being searched for. 00202 if (!DisableBranchOpts) 00203 EverMadeChange |= sinkAndCmp(F); 00204 00205 bool MadeChange = true; 00206 while (MadeChange) { 00207 MadeChange = false; 00208 for (Function::iterator I = F.begin(); I != F.end(); ) { 00209 BasicBlock *BB = I++; 00210 MadeChange |= OptimizeBlock(*BB); 00211 } 00212 EverMadeChange |= MadeChange; 00213 } 00214 00215 SunkAddrs.clear(); 00216 00217 if (!DisableBranchOpts) { 00218 MadeChange = false; 00219 SmallPtrSet<BasicBlock*, 8> WorkList; 00220 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { 00221 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 00222 MadeChange |= ConstantFoldTerminator(BB, true); 00223 if (!MadeChange) continue; 00224 00225 for (SmallVectorImpl<BasicBlock*>::iterator 00226 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 00227 if (pred_begin(*II) == pred_end(*II)) 00228 WorkList.insert(*II); 00229 } 00230 00231 // Delete the dead blocks and any of their dead successors. 00232 MadeChange |= !WorkList.empty(); 00233 while (!WorkList.empty()) { 00234 BasicBlock *BB = *WorkList.begin(); 00235 WorkList.erase(BB); 00236 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 00237 00238 DeleteDeadBlock(BB); 00239 00240 for (SmallVectorImpl<BasicBlock*>::iterator 00241 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 00242 if (pred_begin(*II) == pred_end(*II)) 00243 WorkList.insert(*II); 00244 } 00245 00246 // Merge pairs of basic blocks with unconditional branches, connected by 00247 // a single edge. 00248 if (EverMadeChange || MadeChange) 00249 MadeChange |= EliminateFallThrough(F); 00250 00251 if (MadeChange) 00252 ModifiedDT = true; 00253 EverMadeChange |= MadeChange; 00254 } 00255 00256 if (ModifiedDT && DT) 00257 DT->recalculate(F); 00258 00259 return EverMadeChange; 00260 } 00261 00262 /// EliminateFallThrough - Merge basic blocks which are connected 00263 /// by a single edge, where one of the basic blocks has a single successor 00264 /// pointing to the other basic block, which has a single predecessor. 00265 bool CodeGenPrepare::EliminateFallThrough(Function &F) { 00266 bool Changed = false; 00267 // Scan all of the blocks in the function, except for the entry block. 00268 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 00269 BasicBlock *BB = I++; 00270 // If the destination block has a single pred, then this is a trivial 00271 // edge, just collapse it. 00272 BasicBlock *SinglePred = BB->getSinglePredecessor(); 00273 00274 // Don't merge if BB's address is taken. 00275 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 00276 00277 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 00278 if (Term && !Term->isConditional()) { 00279 Changed = true; 00280 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); 00281 // Remember if SinglePred was the entry block of the function. 00282 // If so, we will need to move BB back to the entry position. 00283 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 00284 MergeBasicBlockIntoOnlyPred(BB, this); 00285 00286 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 00287 BB->moveBefore(&BB->getParent()->getEntryBlock()); 00288 00289 // We have erased a block. Update the iterator. 00290 I = BB; 00291 } 00292 } 00293 return Changed; 00294 } 00295 00296 /// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes, 00297 /// debug info directives, and an unconditional branch. Passes before isel 00298 /// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for 00299 /// isel. Start by eliminating these blocks so we can split them the way we 00300 /// want them. 00301 bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { 00302 bool MadeChange = false; 00303 // Note that this intentionally skips the entry block. 00304 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 00305 BasicBlock *BB = I++; 00306 00307 // If this block doesn't end with an uncond branch, ignore it. 00308 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 00309 if (!BI || !BI->isUnconditional()) 00310 continue; 00311 00312 // If the instruction before the branch (skipping debug info) isn't a phi 00313 // node, then other stuff is happening here. 00314 BasicBlock::iterator BBI = BI; 00315 if (BBI != BB->begin()) { 00316 --BBI; 00317 while (isa<DbgInfoIntrinsic>(BBI)) { 00318 if (BBI == BB->begin()) 00319 break; 00320 --BBI; 00321 } 00322 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 00323 continue; 00324 } 00325 00326 // Do not break infinite loops. 00327 BasicBlock *DestBB = BI->getSuccessor(0); 00328 if (DestBB == BB) 00329 continue; 00330 00331 if (!CanMergeBlocks(BB, DestBB)) 00332 continue; 00333 00334 EliminateMostlyEmptyBlock(BB); 00335 MadeChange = true; 00336 } 00337 return MadeChange; 00338 } 00339 00340 /// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a 00341 /// single uncond branch between them, and BB contains no other non-phi 00342 /// instructions. 00343 bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB, 00344 const BasicBlock *DestBB) const { 00345 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 00346 // the successor. If there are more complex condition (e.g. preheaders), 00347 // don't mess around with them. 00348 BasicBlock::const_iterator BBI = BB->begin(); 00349 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 00350 for (const User *U : PN->users()) { 00351 const Instruction *UI = cast<Instruction>(U); 00352 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 00353 return false; 00354 // If User is inside DestBB block and it is a PHINode then check 00355 // incoming value. If incoming value is not from BB then this is 00356 // a complex condition (e.g. preheaders) we want to avoid here. 00357 if (UI->getParent() == DestBB) { 00358 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 00359 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 00360 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 00361 if (Insn && Insn->getParent() == BB && 00362 Insn->getParent() != UPN->getIncomingBlock(I)) 00363 return false; 00364 } 00365 } 00366 } 00367 } 00368 00369 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 00370 // and DestBB may have conflicting incoming values for the block. If so, we 00371 // can't merge the block. 00372 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 00373 if (!DestBBPN) return true; // no conflict. 00374 00375 // Collect the preds of BB. 00376 SmallPtrSet<const BasicBlock*, 16> BBPreds; 00377 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 00378 // It is faster to get preds from a PHI than with pred_iterator. 00379 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 00380 BBPreds.insert(BBPN->getIncomingBlock(i)); 00381 } else { 00382 BBPreds.insert(pred_begin(BB), pred_end(BB)); 00383 } 00384 00385 // Walk the preds of DestBB. 00386 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 00387 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 00388 if (BBPreds.count(Pred)) { // Common predecessor? 00389 BBI = DestBB->begin(); 00390 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 00391 const Value *V1 = PN->getIncomingValueForBlock(Pred); 00392 const Value *V2 = PN->getIncomingValueForBlock(BB); 00393 00394 // If V2 is a phi node in BB, look up what the mapped value will be. 00395 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 00396 if (V2PN->getParent() == BB) 00397 V2 = V2PN->getIncomingValueForBlock(Pred); 00398 00399 // If there is a conflict, bail out. 00400 if (V1 != V2) return false; 00401 } 00402 } 00403 } 00404 00405 return true; 00406 } 00407 00408 00409 /// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and 00410 /// an unconditional branch in it. 00411 void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { 00412 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 00413 BasicBlock *DestBB = BI->getSuccessor(0); 00414 00415 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 00416 00417 // If the destination block has a single pred, then this is a trivial edge, 00418 // just collapse it. 00419 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 00420 if (SinglePred != DestBB) { 00421 // Remember if SinglePred was the entry block of the function. If so, we 00422 // will need to move BB back to the entry position. 00423 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 00424 MergeBasicBlockIntoOnlyPred(DestBB, this); 00425 00426 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 00427 BB->moveBefore(&BB->getParent()->getEntryBlock()); 00428 00429 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 00430 return; 00431 } 00432 } 00433 00434 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 00435 // to handle the new incoming edges it is about to have. 00436 PHINode *PN; 00437 for (BasicBlock::iterator BBI = DestBB->begin(); 00438 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 00439 // Remove the incoming value for BB, and remember it. 00440 Value *InVal = PN->removeIncomingValue(BB, false); 00441 00442 // Two options: either the InVal is a phi node defined in BB or it is some 00443 // value that dominates BB. 00444 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 00445 if (InValPhi && InValPhi->getParent() == BB) { 00446 // Add all of the input values of the input PHI as inputs of this phi. 00447 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 00448 PN->addIncoming(InValPhi->getIncomingValue(i), 00449 InValPhi->getIncomingBlock(i)); 00450 } else { 00451 // Otherwise, add one instance of the dominating value for each edge that 00452 // we will be adding. 00453 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 00454 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 00455 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 00456 } else { 00457 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 00458 PN->addIncoming(InVal, *PI); 00459 } 00460 } 00461 } 00462 00463 // The PHIs are now updated, change everything that refers to BB to use 00464 // DestBB and remove BB. 00465 BB->replaceAllUsesWith(DestBB); 00466 if (DT && !ModifiedDT) { 00467 BasicBlock *BBIDom = DT->getNode(BB)->getIDom()->getBlock(); 00468 BasicBlock *DestBBIDom = DT->getNode(DestBB)->getIDom()->getBlock(); 00469 BasicBlock *NewIDom = DT->findNearestCommonDominator(BBIDom, DestBBIDom); 00470 DT->changeImmediateDominator(DestBB, NewIDom); 00471 DT->eraseNode(BB); 00472 } 00473 BB->eraseFromParent(); 00474 ++NumBlocksElim; 00475 00476 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 00477 } 00478 00479 /// SinkCast - Sink the specified cast instruction into its user blocks 00480 static bool SinkCast(CastInst *CI) { 00481 BasicBlock *DefBB = CI->getParent(); 00482 00483 /// InsertedCasts - Only insert a cast in each block once. 00484 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 00485 00486 bool MadeChange = false; 00487 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 00488 UI != E; ) { 00489 Use &TheUse = UI.getUse(); 00490 Instruction *User = cast<Instruction>(*UI); 00491 00492 // Figure out which BB this cast is used in. For PHI's this is the 00493 // appropriate predecessor block. 00494 BasicBlock *UserBB = User->getParent(); 00495 if (PHINode *PN = dyn_cast<PHINode>(User)) { 00496 UserBB = PN->getIncomingBlock(TheUse); 00497 } 00498 00499 // Preincrement use iterator so we don't invalidate it. 00500 ++UI; 00501 00502 // If this user is in the same block as the cast, don't change the cast. 00503 if (UserBB == DefBB) continue; 00504 00505 // If we have already inserted a cast into this block, use it. 00506 CastInst *&InsertedCast = InsertedCasts[UserBB]; 00507 00508 if (!InsertedCast) { 00509 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 00510 InsertedCast = 00511 CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", 00512 InsertPt); 00513 MadeChange = true; 00514 } 00515 00516 // Replace a use of the cast with a use of the new cast. 00517 TheUse = InsertedCast; 00518 ++NumCastUses; 00519 } 00520 00521 // If we removed all uses, nuke the cast. 00522 if (CI->use_empty()) { 00523 CI->eraseFromParent(); 00524 MadeChange = true; 00525 } 00526 00527 return MadeChange; 00528 } 00529 00530 /// OptimizeNoopCopyExpression - If the specified cast instruction is a noop 00531 /// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC), 00532 /// sink it into user blocks to reduce the number of virtual 00533 /// registers that must be created and coalesced. 00534 /// 00535 /// Return true if any changes are made. 00536 /// 00537 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ 00538 // If this is a noop copy, 00539 EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 00540 EVT DstVT = TLI.getValueType(CI->getType()); 00541 00542 // This is an fp<->int conversion? 00543 if (SrcVT.isInteger() != DstVT.isInteger()) 00544 return false; 00545 00546 // If this is an extension, it will be a zero or sign extension, which 00547 // isn't a noop. 00548 if (SrcVT.bitsLT(DstVT)) return false; 00549 00550 // If these values will be promoted, find out what they will be promoted 00551 // to. This helps us consider truncates on PPC as noop copies when they 00552 // are. 00553 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 00554 TargetLowering::TypePromoteInteger) 00555 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 00556 if (TLI.getTypeAction(CI->getContext(), DstVT) == 00557 TargetLowering::TypePromoteInteger) 00558 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 00559 00560 // If, after promotion, these are the same types, this is a noop copy. 00561 if (SrcVT != DstVT) 00562 return false; 00563 00564 return SinkCast(CI); 00565 } 00566 00567 /// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce 00568 /// the number of virtual registers that must be created and coalesced. This is 00569 /// a clear win except on targets with multiple condition code registers 00570 /// (PowerPC), where it might lose; some adjustment may be wanted there. 00571 /// 00572 /// Return true if any changes are made. 00573 static bool OptimizeCmpExpression(CmpInst *CI) { 00574 BasicBlock *DefBB = CI->getParent(); 00575 00576 /// InsertedCmp - Only insert a cmp in each block once. 00577 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 00578 00579 bool MadeChange = false; 00580 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 00581 UI != E; ) { 00582 Use &TheUse = UI.getUse(); 00583 Instruction *User = cast<Instruction>(*UI); 00584 00585 // Preincrement use iterator so we don't invalidate it. 00586 ++UI; 00587 00588 // Don't bother for PHI nodes. 00589 if (isa<PHINode>(User)) 00590 continue; 00591 00592 // Figure out which BB this cmp is used in. 00593 BasicBlock *UserBB = User->getParent(); 00594 00595 // If this user is in the same block as the cmp, don't change the cmp. 00596 if (UserBB == DefBB) continue; 00597 00598 // If we have already inserted a cmp into this block, use it. 00599 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 00600 00601 if (!InsertedCmp) { 00602 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 00603 InsertedCmp = 00604 CmpInst::Create(CI->getOpcode(), 00605 CI->getPredicate(), CI->getOperand(0), 00606 CI->getOperand(1), "", InsertPt); 00607 MadeChange = true; 00608 } 00609 00610 // Replace a use of the cmp with a use of the new cmp. 00611 TheUse = InsertedCmp; 00612 ++NumCmpUses; 00613 } 00614 00615 // If we removed all uses, nuke the cmp. 00616 if (CI->use_empty()) 00617 CI->eraseFromParent(); 00618 00619 return MadeChange; 00620 } 00621 00622 /// isExtractBitsCandidateUse - Check if the candidates could 00623 /// be combined with shift instruction, which includes: 00624 /// 1. Truncate instruction 00625 /// 2. And instruction and the imm is a mask of the low bits: 00626 /// imm & (imm+1) == 0 00627 static bool isExtractBitsCandidateUse(Instruction *User) { 00628 if (!isa<TruncInst>(User)) { 00629 if (User->getOpcode() != Instruction::And || 00630 !isa<ConstantInt>(User->getOperand(1))) 00631 return false; 00632 00633 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 00634 00635 if ((Cimm & (Cimm + 1)).getBoolValue()) 00636 return false; 00637 } 00638 return true; 00639 } 00640 00641 /// SinkShiftAndTruncate - sink both shift and truncate instruction 00642 /// to the use of truncate's BB. 00643 static bool 00644 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 00645 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 00646 const TargetLowering &TLI) { 00647 BasicBlock *UserBB = User->getParent(); 00648 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 00649 TruncInst *TruncI = dyn_cast<TruncInst>(User); 00650 bool MadeChange = false; 00651 00652 for (Value::user_iterator TruncUI = TruncI->user_begin(), 00653 TruncE = TruncI->user_end(); 00654 TruncUI != TruncE;) { 00655 00656 Use &TruncTheUse = TruncUI.getUse(); 00657 Instruction *TruncUser = cast<Instruction>(*TruncUI); 00658 // Preincrement use iterator so we don't invalidate it. 00659 00660 ++TruncUI; 00661 00662 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 00663 if (!ISDOpcode) 00664 continue; 00665 00666 // If the use is actually a legal node, there will not be an 00667 // implicit truncate. 00668 // FIXME: always querying the result type is just an 00669 // approximation; some nodes' legality is determined by the 00670 // operand or other means. There's no good way to find out though. 00671 if (TLI.isOperationLegalOrCustom(ISDOpcode, 00672 EVT::getEVT(TruncUser->getType(), true))) 00673 continue; 00674 00675 // Don't bother for PHI nodes. 00676 if (isa<PHINode>(TruncUser)) 00677 continue; 00678 00679 BasicBlock *TruncUserBB = TruncUser->getParent(); 00680 00681 if (UserBB == TruncUserBB) 00682 continue; 00683 00684 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 00685 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 00686 00687 if (!InsertedShift && !InsertedTrunc) { 00688 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 00689 // Sink the shift 00690 if (ShiftI->getOpcode() == Instruction::AShr) 00691 InsertedShift = 00692 BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "", InsertPt); 00693 else 00694 InsertedShift = 00695 BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "", InsertPt); 00696 00697 // Sink the trunc 00698 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 00699 TruncInsertPt++; 00700 00701 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 00702 TruncI->getType(), "", TruncInsertPt); 00703 00704 MadeChange = true; 00705 00706 TruncTheUse = InsertedTrunc; 00707 } 00708 } 00709 return MadeChange; 00710 } 00711 00712 /// OptimizeExtractBits - sink the shift *right* instruction into user blocks if 00713 /// the uses could potentially be combined with this shift instruction and 00714 /// generate BitExtract instruction. It will only be applied if the architecture 00715 /// supports BitExtract instruction. Here is an example: 00716 /// BB1: 00717 /// %x.extract.shift = lshr i64 %arg1, 32 00718 /// BB2: 00719 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 00720 /// ==> 00721 /// 00722 /// BB2: 00723 /// %x.extract.shift.1 = lshr i64 %arg1, 32 00724 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 00725 /// 00726 /// CodeGen will recoginze the pattern in BB2 and generate BitExtract 00727 /// instruction. 00728 /// Return true if any changes are made. 00729 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 00730 const TargetLowering &TLI) { 00731 BasicBlock *DefBB = ShiftI->getParent(); 00732 00733 /// Only insert instructions in each block once. 00734 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 00735 00736 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(ShiftI->getType())); 00737 00738 bool MadeChange = false; 00739 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 00740 UI != E;) { 00741 Use &TheUse = UI.getUse(); 00742 Instruction *User = cast<Instruction>(*UI); 00743 // Preincrement use iterator so we don't invalidate it. 00744 ++UI; 00745 00746 // Don't bother for PHI nodes. 00747 if (isa<PHINode>(User)) 00748 continue; 00749 00750 if (!isExtractBitsCandidateUse(User)) 00751 continue; 00752 00753 BasicBlock *UserBB = User->getParent(); 00754 00755 if (UserBB == DefBB) { 00756 // If the shift and truncate instruction are in the same BB. The use of 00757 // the truncate(TruncUse) may still introduce another truncate if not 00758 // legal. In this case, we would like to sink both shift and truncate 00759 // instruction to the BB of TruncUse. 00760 // for example: 00761 // BB1: 00762 // i64 shift.result = lshr i64 opnd, imm 00763 // trunc.result = trunc shift.result to i16 00764 // 00765 // BB2: 00766 // ----> We will have an implicit truncate here if the architecture does 00767 // not have i16 compare. 00768 // cmp i16 trunc.result, opnd2 00769 // 00770 if (isa<TruncInst>(User) && shiftIsLegal 00771 // If the type of the truncate is legal, no trucate will be 00772 // introduced in other basic blocks. 00773 && (!TLI.isTypeLegal(TLI.getValueType(User->getType())))) 00774 MadeChange = 00775 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI); 00776 00777 continue; 00778 } 00779 // If we have already inserted a shift into this block, use it. 00780 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 00781 00782 if (!InsertedShift) { 00783 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 00784 00785 if (ShiftI->getOpcode() == Instruction::AShr) 00786 InsertedShift = 00787 BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "", InsertPt); 00788 else 00789 InsertedShift = 00790 BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "", InsertPt); 00791 00792 MadeChange = true; 00793 } 00794 00795 // Replace a use of the shift with a use of the new shift. 00796 TheUse = InsertedShift; 00797 } 00798 00799 // If we removed all uses, nuke the shift. 00800 if (ShiftI->use_empty()) 00801 ShiftI->eraseFromParent(); 00802 00803 return MadeChange; 00804 } 00805 00806 namespace { 00807 class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls { 00808 protected: 00809 void replaceCall(Value *With) override { 00810 CI->replaceAllUsesWith(With); 00811 CI->eraseFromParent(); 00812 } 00813 bool isFoldable(unsigned SizeCIOp, unsigned, bool) const override { 00814 if (ConstantInt *SizeCI = 00815 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) 00816 return SizeCI->isAllOnesValue(); 00817 return false; 00818 } 00819 }; 00820 } // end anonymous namespace 00821 00822 bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { 00823 BasicBlock *BB = CI->getParent(); 00824 00825 // Lower inline assembly if we can. 00826 // If we found an inline asm expession, and if the target knows how to 00827 // lower it to normal LLVM code, do so now. 00828 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 00829 if (TLI->ExpandInlineAsm(CI)) { 00830 // Avoid invalidating the iterator. 00831 CurInstIterator = BB->begin(); 00832 // Avoid processing instructions out of order, which could cause 00833 // reuse before a value is defined. 00834 SunkAddrs.clear(); 00835 return true; 00836 } 00837 // Sink address computing for memory operands into the block. 00838 if (OptimizeInlineAsmInst(CI)) 00839 return true; 00840 } 00841 00842 // Lower all uses of llvm.objectsize.* 00843 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 00844 if (II && II->getIntrinsicID() == Intrinsic::objectsize) { 00845 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 00846 Type *ReturnTy = CI->getType(); 00847 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 00848 00849 // Substituting this can cause recursive simplifications, which can 00850 // invalidate our iterator. Use a WeakVH to hold onto it in case this 00851 // happens. 00852 WeakVH IterHandle(CurInstIterator); 00853 00854 replaceAndRecursivelySimplify(CI, RetVal, 00855 TLI ? TLI->getDataLayout() : nullptr, 00856 TLInfo, ModifiedDT ? nullptr : DT); 00857 00858 // If the iterator instruction was recursively deleted, start over at the 00859 // start of the block. 00860 if (IterHandle != CurInstIterator) { 00861 CurInstIterator = BB->begin(); 00862 SunkAddrs.clear(); 00863 } 00864 return true; 00865 } 00866 00867 if (II && TLI) { 00868 SmallVector<Value*, 2> PtrOps; 00869 Type *AccessTy; 00870 if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy)) 00871 while (!PtrOps.empty()) 00872 if (OptimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy)) 00873 return true; 00874 } 00875 00876 // From here on out we're working with named functions. 00877 if (!CI->getCalledFunction()) return false; 00878 00879 // We'll need DataLayout from here on out. 00880 const DataLayout *TD = TLI ? TLI->getDataLayout() : nullptr; 00881 if (!TD) return false; 00882 00883 // Lower all default uses of _chk calls. This is very similar 00884 // to what InstCombineCalls does, but here we are only lowering calls 00885 // that have the default "don't know" as the objectsize. Anything else 00886 // should be left alone. 00887 CodeGenPrepareFortifiedLibCalls Simplifier; 00888 return Simplifier.fold(CI, TD, TLInfo); 00889 } 00890 00891 /// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return 00892 /// instructions to the predecessor to enable tail call optimizations. The 00893 /// case it is currently looking for is: 00894 /// @code 00895 /// bb0: 00896 /// %tmp0 = tail call i32 @f0() 00897 /// br label %return 00898 /// bb1: 00899 /// %tmp1 = tail call i32 @f1() 00900 /// br label %return 00901 /// bb2: 00902 /// %tmp2 = tail call i32 @f2() 00903 /// br label %return 00904 /// return: 00905 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 00906 /// ret i32 %retval 00907 /// @endcode 00908 /// 00909 /// => 00910 /// 00911 /// @code 00912 /// bb0: 00913 /// %tmp0 = tail call i32 @f0() 00914 /// ret i32 %tmp0 00915 /// bb1: 00916 /// %tmp1 = tail call i32 @f1() 00917 /// ret i32 %tmp1 00918 /// bb2: 00919 /// %tmp2 = tail call i32 @f2() 00920 /// ret i32 %tmp2 00921 /// @endcode 00922 bool CodeGenPrepare::DupRetToEnableTailCallOpts(BasicBlock *BB) { 00923 if (!TLI) 00924 return false; 00925 00926 ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()); 00927 if (!RI) 00928 return false; 00929 00930 PHINode *PN = nullptr; 00931 BitCastInst *BCI = nullptr; 00932 Value *V = RI->getReturnValue(); 00933 if (V) { 00934 BCI = dyn_cast<BitCastInst>(V); 00935 if (BCI) 00936 V = BCI->getOperand(0); 00937 00938 PN = dyn_cast<PHINode>(V); 00939 if (!PN) 00940 return false; 00941 } 00942 00943 if (PN && PN->getParent() != BB) 00944 return false; 00945 00946 // It's not safe to eliminate the sign / zero extension of the return value. 00947 // See llvm::isInTailCallPosition(). 00948 const Function *F = BB->getParent(); 00949 AttributeSet CallerAttrs = F->getAttributes(); 00950 if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) || 00951 CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) 00952 return false; 00953 00954 // Make sure there are no instructions between the PHI and return, or that the 00955 // return is the first instruction in the block. 00956 if (PN) { 00957 BasicBlock::iterator BI = BB->begin(); 00958 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); 00959 if (&*BI == BCI) 00960 // Also skip over the bitcast. 00961 ++BI; 00962 if (&*BI != RI) 00963 return false; 00964 } else { 00965 BasicBlock::iterator BI = BB->begin(); 00966 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 00967 if (&*BI != RI) 00968 return false; 00969 } 00970 00971 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 00972 /// call. 00973 SmallVector<CallInst*, 4> TailCalls; 00974 if (PN) { 00975 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 00976 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 00977 // Make sure the phi value is indeed produced by the tail call. 00978 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 00979 TLI->mayBeEmittedAsTailCall(CI)) 00980 TailCalls.push_back(CI); 00981 } 00982 } else { 00983 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 00984 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 00985 if (!VisitedBBs.insert(*PI)) 00986 continue; 00987 00988 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 00989 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 00990 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 00991 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 00992 if (RI == RE) 00993 continue; 00994 00995 CallInst *CI = dyn_cast<CallInst>(&*RI); 00996 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI)) 00997 TailCalls.push_back(CI); 00998 } 00999 } 01000 01001 bool Changed = false; 01002 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 01003 CallInst *CI = TailCalls[i]; 01004 CallSite CS(CI); 01005 01006 // Conservatively require the attributes of the call to match those of the 01007 // return. Ignore noalias because it doesn't affect the call sequence. 01008 AttributeSet CalleeAttrs = CS.getAttributes(); 01009 if (AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 01010 removeAttribute(Attribute::NoAlias) != 01011 AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 01012 removeAttribute(Attribute::NoAlias)) 01013 continue; 01014 01015 // Make sure the call instruction is followed by an unconditional branch to 01016 // the return block. 01017 BasicBlock *CallBB = CI->getParent(); 01018 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 01019 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 01020 continue; 01021 01022 // Duplicate the return into CallBB. 01023 (void)FoldReturnIntoUncondBranch(RI, BB, CallBB); 01024 ModifiedDT = Changed = true; 01025 ++NumRetsDup; 01026 } 01027 01028 // If we eliminated all predecessors of the block, delete the block now. 01029 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 01030 BB->eraseFromParent(); 01031 01032 return Changed; 01033 } 01034 01035 //===----------------------------------------------------------------------===// 01036 // Memory Optimization 01037 //===----------------------------------------------------------------------===// 01038 01039 namespace { 01040 01041 /// ExtAddrMode - This is an extended version of TargetLowering::AddrMode 01042 /// which holds actual Value*'s for register values. 01043 struct ExtAddrMode : public TargetLowering::AddrMode { 01044 Value *BaseReg; 01045 Value *ScaledReg; 01046 ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {} 01047 void print(raw_ostream &OS) const; 01048 void dump() const; 01049 01050 bool operator==(const ExtAddrMode& O) const { 01051 return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) && 01052 (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) && 01053 (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale); 01054 } 01055 }; 01056 01057 #ifndef NDEBUG 01058 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 01059 AM.print(OS); 01060 return OS; 01061 } 01062 #endif 01063 01064 void ExtAddrMode::print(raw_ostream &OS) const { 01065 bool NeedPlus = false; 01066 OS << "["; 01067 if (BaseGV) { 01068 OS << (NeedPlus ? " + " : "") 01069 << "GV:"; 01070 BaseGV->printAsOperand(OS, /*PrintType=*/false); 01071 NeedPlus = true; 01072 } 01073 01074 if (BaseOffs) { 01075 OS << (NeedPlus ? " + " : "") 01076 << BaseOffs; 01077 NeedPlus = true; 01078 } 01079 01080 if (BaseReg) { 01081 OS << (NeedPlus ? " + " : "") 01082 << "Base:"; 01083 BaseReg->printAsOperand(OS, /*PrintType=*/false); 01084 NeedPlus = true; 01085 } 01086 if (Scale) { 01087 OS << (NeedPlus ? " + " : "") 01088 << Scale << "*"; 01089 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 01090 } 01091 01092 OS << ']'; 01093 } 01094 01095 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 01096 void ExtAddrMode::dump() const { 01097 print(dbgs()); 01098 dbgs() << '\n'; 01099 } 01100 #endif 01101 01102 /// \brief This class provides transaction based operation on the IR. 01103 /// Every change made through this class is recorded in the internal state and 01104 /// can be undone (rollback) until commit is called. 01105 class TypePromotionTransaction { 01106 01107 /// \brief This represents the common interface of the individual transaction. 01108 /// Each class implements the logic for doing one specific modification on 01109 /// the IR via the TypePromotionTransaction. 01110 class TypePromotionAction { 01111 protected: 01112 /// The Instruction modified. 01113 Instruction *Inst; 01114 01115 public: 01116 /// \brief Constructor of the action. 01117 /// The constructor performs the related action on the IR. 01118 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 01119 01120 virtual ~TypePromotionAction() {} 01121 01122 /// \brief Undo the modification done by this action. 01123 /// When this method is called, the IR must be in the same state as it was 01124 /// before this action was applied. 01125 /// \pre Undoing the action works if and only if the IR is in the exact same 01126 /// state as it was directly after this action was applied. 01127 virtual void undo() = 0; 01128 01129 /// \brief Advocate every change made by this action. 01130 /// When the results on the IR of the action are to be kept, it is important 01131 /// to call this function, otherwise hidden information may be kept forever. 01132 virtual void commit() { 01133 // Nothing to be done, this action is not doing anything. 01134 } 01135 }; 01136 01137 /// \brief Utility to remember the position of an instruction. 01138 class InsertionHandler { 01139 /// Position of an instruction. 01140 /// Either an instruction: 01141 /// - Is the first in a basic block: BB is used. 01142 /// - Has a previous instructon: PrevInst is used. 01143 union { 01144 Instruction *PrevInst; 01145 BasicBlock *BB; 01146 } Point; 01147 /// Remember whether or not the instruction had a previous instruction. 01148 bool HasPrevInstruction; 01149 01150 public: 01151 /// \brief Record the position of \p Inst. 01152 InsertionHandler(Instruction *Inst) { 01153 BasicBlock::iterator It = Inst; 01154 HasPrevInstruction = (It != (Inst->getParent()->begin())); 01155 if (HasPrevInstruction) 01156 Point.PrevInst = --It; 01157 else 01158 Point.BB = Inst->getParent(); 01159 } 01160 01161 /// \brief Insert \p Inst at the recorded position. 01162 void insert(Instruction *Inst) { 01163 if (HasPrevInstruction) { 01164 if (Inst->getParent()) 01165 Inst->removeFromParent(); 01166 Inst->insertAfter(Point.PrevInst); 01167 } else { 01168 Instruction *Position = Point.BB->getFirstInsertionPt(); 01169 if (Inst->getParent()) 01170 Inst->moveBefore(Position); 01171 else 01172 Inst->insertBefore(Position); 01173 } 01174 } 01175 }; 01176 01177 /// \brief Move an instruction before another. 01178 class InstructionMoveBefore : public TypePromotionAction { 01179 /// Original position of the instruction. 01180 InsertionHandler Position; 01181 01182 public: 01183 /// \brief Move \p Inst before \p Before. 01184 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 01185 : TypePromotionAction(Inst), Position(Inst) { 01186 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); 01187 Inst->moveBefore(Before); 01188 } 01189 01190 /// \brief Move the instruction back to its original position. 01191 void undo() override { 01192 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 01193 Position.insert(Inst); 01194 } 01195 }; 01196 01197 /// \brief Set the operand of an instruction with a new value. 01198 class OperandSetter : public TypePromotionAction { 01199 /// Original operand of the instruction. 01200 Value *Origin; 01201 /// Index of the modified instruction. 01202 unsigned Idx; 01203 01204 public: 01205 /// \brief Set \p Idx operand of \p Inst with \p NewVal. 01206 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 01207 : TypePromotionAction(Inst), Idx(Idx) { 01208 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 01209 << "for:" << *Inst << "\n" 01210 << "with:" << *NewVal << "\n"); 01211 Origin = Inst->getOperand(Idx); 01212 Inst->setOperand(Idx, NewVal); 01213 } 01214 01215 /// \brief Restore the original value of the instruction. 01216 void undo() override { 01217 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 01218 << "for: " << *Inst << "\n" 01219 << "with: " << *Origin << "\n"); 01220 Inst->setOperand(Idx, Origin); 01221 } 01222 }; 01223 01224 /// \brief Hide the operands of an instruction. 01225 /// Do as if this instruction was not using any of its operands. 01226 class OperandsHider : public TypePromotionAction { 01227 /// The list of original operands. 01228 SmallVector<Value *, 4> OriginalValues; 01229 01230 public: 01231 /// \brief Remove \p Inst from the uses of the operands of \p Inst. 01232 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 01233 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 01234 unsigned NumOpnds = Inst->getNumOperands(); 01235 OriginalValues.reserve(NumOpnds); 01236 for (unsigned It = 0; It < NumOpnds; ++It) { 01237 // Save the current operand. 01238 Value *Val = Inst->getOperand(It); 01239 OriginalValues.push_back(Val); 01240 // Set a dummy one. 01241 // We could use OperandSetter here, but that would implied an overhead 01242 // that we are not willing to pay. 01243 Inst->setOperand(It, UndefValue::get(Val->getType())); 01244 } 01245 } 01246 01247 /// \brief Restore the original list of uses. 01248 void undo() override { 01249 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 01250 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 01251 Inst->setOperand(It, OriginalValues[It]); 01252 } 01253 }; 01254 01255 /// \brief Build a truncate instruction. 01256 class TruncBuilder : public TypePromotionAction { 01257 Value *Val; 01258 public: 01259 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty 01260 /// result. 01261 /// trunc Opnd to Ty. 01262 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 01263 IRBuilder<> Builder(Opnd); 01264 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 01265 DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 01266 } 01267 01268 /// \brief Get the built value. 01269 Value *getBuiltValue() { return Val; } 01270 01271 /// \brief Remove the built instruction. 01272 void undo() override { 01273 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 01274 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 01275 IVal->eraseFromParent(); 01276 } 01277 }; 01278 01279 /// \brief Build a sign extension instruction. 01280 class SExtBuilder : public TypePromotionAction { 01281 Value *Val; 01282 public: 01283 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty 01284 /// result. 01285 /// sext Opnd to Ty. 01286 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 01287 : TypePromotionAction(InsertPt) { 01288 IRBuilder<> Builder(InsertPt); 01289 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 01290 DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 01291 } 01292 01293 /// \brief Get the built value. 01294 Value *getBuiltValue() { return Val; } 01295 01296 /// \brief Remove the built instruction. 01297 void undo() override { 01298 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 01299 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 01300 IVal->eraseFromParent(); 01301 } 01302 }; 01303 01304 /// \brief Build a zero extension instruction. 01305 class ZExtBuilder : public TypePromotionAction { 01306 Value *Val; 01307 public: 01308 /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty 01309 /// result. 01310 /// zext Opnd to Ty. 01311 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 01312 : TypePromotionAction(InsertPt) { 01313 IRBuilder<> Builder(InsertPt); 01314 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 01315 DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 01316 } 01317 01318 /// \brief Get the built value. 01319 Value *getBuiltValue() { return Val; } 01320 01321 /// \brief Remove the built instruction. 01322 void undo() override { 01323 DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 01324 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 01325 IVal->eraseFromParent(); 01326 } 01327 }; 01328 01329 /// \brief Mutate an instruction to another type. 01330 class TypeMutator : public TypePromotionAction { 01331 /// Record the original type. 01332 Type *OrigTy; 01333 01334 public: 01335 /// \brief Mutate the type of \p Inst into \p NewTy. 01336 TypeMutator(Instruction *Inst, Type *NewTy) 01337 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 01338 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 01339 << "\n"); 01340 Inst->mutateType(NewTy); 01341 } 01342 01343 /// \brief Mutate the instruction back to its original type. 01344 void undo() override { 01345 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 01346 << "\n"); 01347 Inst->mutateType(OrigTy); 01348 } 01349 }; 01350 01351 /// \brief Replace the uses of an instruction by another instruction. 01352 class UsesReplacer : public TypePromotionAction { 01353 /// Helper structure to keep track of the replaced uses. 01354 struct InstructionAndIdx { 01355 /// The instruction using the instruction. 01356 Instruction *Inst; 01357 /// The index where this instruction is used for Inst. 01358 unsigned Idx; 01359 InstructionAndIdx(Instruction *Inst, unsigned Idx) 01360 : Inst(Inst), Idx(Idx) {} 01361 }; 01362 01363 /// Keep track of the original uses (pair Instruction, Index). 01364 SmallVector<InstructionAndIdx, 4> OriginalUses; 01365 typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator; 01366 01367 public: 01368 /// \brief Replace all the use of \p Inst by \p New. 01369 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 01370 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 01371 << "\n"); 01372 // Record the original uses. 01373 for (Use &U : Inst->uses()) { 01374 Instruction *UserI = cast<Instruction>(U.getUser()); 01375 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 01376 } 01377 // Now, we can replace the uses. 01378 Inst->replaceAllUsesWith(New); 01379 } 01380 01381 /// \brief Reassign the original uses of Inst to Inst. 01382 void undo() override { 01383 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 01384 for (use_iterator UseIt = OriginalUses.begin(), 01385 EndIt = OriginalUses.end(); 01386 UseIt != EndIt; ++UseIt) { 01387 UseIt->Inst->setOperand(UseIt->Idx, Inst); 01388 } 01389 } 01390 }; 01391 01392 /// \brief Remove an instruction from the IR. 01393 class InstructionRemover : public TypePromotionAction { 01394 /// Original position of the instruction. 01395 InsertionHandler Inserter; 01396 /// Helper structure to hide all the link to the instruction. In other 01397 /// words, this helps to do as if the instruction was removed. 01398 OperandsHider Hider; 01399 /// Keep track of the uses replaced, if any. 01400 UsesReplacer *Replacer; 01401 01402 public: 01403 /// \brief Remove all reference of \p Inst and optinally replace all its 01404 /// uses with New. 01405 /// \pre If !Inst->use_empty(), then New != nullptr 01406 InstructionRemover(Instruction *Inst, Value *New = nullptr) 01407 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 01408 Replacer(nullptr) { 01409 if (New) 01410 Replacer = new UsesReplacer(Inst, New); 01411 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 01412 Inst->removeFromParent(); 01413 } 01414 01415 ~InstructionRemover() { delete Replacer; } 01416 01417 /// \brief Really remove the instruction. 01418 void commit() override { delete Inst; } 01419 01420 /// \brief Resurrect the instruction and reassign it to the proper uses if 01421 /// new value was provided when build this action. 01422 void undo() override { 01423 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 01424 Inserter.insert(Inst); 01425 if (Replacer) 01426 Replacer->undo(); 01427 Hider.undo(); 01428 } 01429 }; 01430 01431 public: 01432 /// Restoration point. 01433 /// The restoration point is a pointer to an action instead of an iterator 01434 /// because the iterator may be invalidated but not the pointer. 01435 typedef const TypePromotionAction *ConstRestorationPt; 01436 /// Advocate every changes made in that transaction. 01437 void commit(); 01438 /// Undo all the changes made after the given point. 01439 void rollback(ConstRestorationPt Point); 01440 /// Get the current restoration point. 01441 ConstRestorationPt getRestorationPoint() const; 01442 01443 /// \name API for IR modification with state keeping to support rollback. 01444 /// @{ 01445 /// Same as Instruction::setOperand. 01446 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 01447 /// Same as Instruction::eraseFromParent. 01448 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 01449 /// Same as Value::replaceAllUsesWith. 01450 void replaceAllUsesWith(Instruction *Inst, Value *New); 01451 /// Same as Value::mutateType. 01452 void mutateType(Instruction *Inst, Type *NewTy); 01453 /// Same as IRBuilder::createTrunc. 01454 Value *createTrunc(Instruction *Opnd, Type *Ty); 01455 /// Same as IRBuilder::createSExt. 01456 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 01457 /// Same as IRBuilder::createZExt. 01458 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 01459 /// Same as Instruction::moveBefore. 01460 void moveBefore(Instruction *Inst, Instruction *Before); 01461 /// @} 01462 01463 private: 01464 /// The ordered list of actions made so far. 01465 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 01466 typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt; 01467 }; 01468 01469 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 01470 Value *NewVal) { 01471 Actions.push_back( 01472 make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal)); 01473 } 01474 01475 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 01476 Value *NewVal) { 01477 Actions.push_back( 01478 make_unique<TypePromotionTransaction::InstructionRemover>(Inst, NewVal)); 01479 } 01480 01481 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 01482 Value *New) { 01483 Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 01484 } 01485 01486 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 01487 Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 01488 } 01489 01490 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 01491 Type *Ty) { 01492 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 01493 Value *Val = Ptr->getBuiltValue(); 01494 Actions.push_back(std::move(Ptr)); 01495 return Val; 01496 } 01497 01498 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 01499 Value *Opnd, Type *Ty) { 01500 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 01501 Value *Val = Ptr->getBuiltValue(); 01502 Actions.push_back(std::move(Ptr)); 01503 return Val; 01504 } 01505 01506 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 01507 Value *Opnd, Type *Ty) { 01508 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 01509 Value *Val = Ptr->getBuiltValue(); 01510 Actions.push_back(std::move(Ptr)); 01511 return Val; 01512 } 01513 01514 void TypePromotionTransaction::moveBefore(Instruction *Inst, 01515 Instruction *Before) { 01516 Actions.push_back( 01517 make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before)); 01518 } 01519 01520 TypePromotionTransaction::ConstRestorationPt 01521 TypePromotionTransaction::getRestorationPoint() const { 01522 return !Actions.empty() ? Actions.back().get() : nullptr; 01523 } 01524 01525 void TypePromotionTransaction::commit() { 01526 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 01527 ++It) 01528 (*It)->commit(); 01529 Actions.clear(); 01530 } 01531 01532 void TypePromotionTransaction::rollback( 01533 TypePromotionTransaction::ConstRestorationPt Point) { 01534 while (!Actions.empty() && Point != Actions.back().get()) { 01535 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 01536 Curr->undo(); 01537 } 01538 } 01539 01540 /// \brief A helper class for matching addressing modes. 01541 /// 01542 /// This encapsulates the logic for matching the target-legal addressing modes. 01543 class AddressingModeMatcher { 01544 SmallVectorImpl<Instruction*> &AddrModeInsts; 01545 const TargetLowering &TLI; 01546 01547 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 01548 /// the memory instruction that we're computing this address for. 01549 Type *AccessTy; 01550 Instruction *MemoryInst; 01551 01552 /// AddrMode - This is the addressing mode that we're building up. This is 01553 /// part of the return value of this addressing mode matching stuff. 01554 ExtAddrMode &AddrMode; 01555 01556 /// The truncate instruction inserted by other CodeGenPrepare optimizations. 01557 const SetOfInstrs &InsertedTruncs; 01558 /// A map from the instructions to their type before promotion. 01559 InstrToOrigTy &PromotedInsts; 01560 /// The ongoing transaction where every action should be registered. 01561 TypePromotionTransaction &TPT; 01562 01563 /// IgnoreProfitability - This is set to true when we should not do 01564 /// profitability checks. When true, IsProfitableToFoldIntoAddressingMode 01565 /// always returns true. 01566 bool IgnoreProfitability; 01567 01568 AddressingModeMatcher(SmallVectorImpl<Instruction*> &AMI, 01569 const TargetLowering &T, Type *AT, 01570 Instruction *MI, ExtAddrMode &AM, 01571 const SetOfInstrs &InsertedTruncs, 01572 InstrToOrigTy &PromotedInsts, 01573 TypePromotionTransaction &TPT) 01574 : AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM), 01575 InsertedTruncs(InsertedTruncs), PromotedInsts(PromotedInsts), TPT(TPT) { 01576 IgnoreProfitability = false; 01577 } 01578 public: 01579 01580 /// Match - Find the maximal addressing mode that a load/store of V can fold, 01581 /// give an access type of AccessTy. This returns a list of involved 01582 /// instructions in AddrModeInsts. 01583 /// \p InsertedTruncs The truncate instruction inserted by other 01584 /// CodeGenPrepare 01585 /// optimizations. 01586 /// \p PromotedInsts maps the instructions to their type before promotion. 01587 /// \p The ongoing transaction where every action should be registered. 01588 static ExtAddrMode Match(Value *V, Type *AccessTy, 01589 Instruction *MemoryInst, 01590 SmallVectorImpl<Instruction*> &AddrModeInsts, 01591 const TargetLowering &TLI, 01592 const SetOfInstrs &InsertedTruncs, 01593 InstrToOrigTy &PromotedInsts, 01594 TypePromotionTransaction &TPT) { 01595 ExtAddrMode Result; 01596 01597 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, AccessTy, 01598 MemoryInst, Result, InsertedTruncs, 01599 PromotedInsts, TPT).MatchAddr(V, 0); 01600 (void)Success; assert(Success && "Couldn't select *anything*?"); 01601 return Result; 01602 } 01603 private: 01604 bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 01605 bool MatchAddr(Value *V, unsigned Depth); 01606 bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, 01607 bool *MovedAway = nullptr); 01608 bool IsProfitableToFoldIntoAddressingMode(Instruction *I, 01609 ExtAddrMode &AMBefore, 01610 ExtAddrMode &AMAfter); 01611 bool ValueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 01612 bool IsPromotionProfitable(unsigned MatchedSize, unsigned SizeWithPromotion, 01613 Value *PromotedOperand) const; 01614 }; 01615 01616 /// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode. 01617 /// Return true and update AddrMode if this addr mode is legal for the target, 01618 /// false if not. 01619 bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale, 01620 unsigned Depth) { 01621 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 01622 // mode. Just process that directly. 01623 if (Scale == 1) 01624 return MatchAddr(ScaleReg, Depth); 01625 01626 // If the scale is 0, it takes nothing to add this. 01627 if (Scale == 0) 01628 return true; 01629 01630 // If we already have a scale of this value, we can add to it, otherwise, we 01631 // need an available scale field. 01632 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 01633 return false; 01634 01635 ExtAddrMode TestAddrMode = AddrMode; 01636 01637 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 01638 // [A+B + A*7] -> [B+A*8]. 01639 TestAddrMode.Scale += Scale; 01640 TestAddrMode.ScaledReg = ScaleReg; 01641 01642 // If the new address isn't legal, bail out. 01643 if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) 01644 return false; 01645 01646 // It was legal, so commit it. 01647 AddrMode = TestAddrMode; 01648 01649 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 01650 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 01651 // X*Scale + C*Scale to addr mode. 01652 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 01653 if (isa<Instruction>(ScaleReg) && // not a constant expr. 01654 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 01655 TestAddrMode.ScaledReg = AddLHS; 01656 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 01657 01658 // If this addressing mode is legal, commit it and remember that we folded 01659 // this instruction. 01660 if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) { 01661 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 01662 AddrMode = TestAddrMode; 01663 return true; 01664 } 01665 } 01666 01667 // Otherwise, not (x+c)*scale, just return what we have. 01668 return true; 01669 } 01670 01671 /// MightBeFoldableInst - This is a little filter, which returns true if an 01672 /// addressing computation involving I might be folded into a load/store 01673 /// accessing it. This doesn't need to be perfect, but needs to accept at least 01674 /// the set of instructions that MatchOperationAddr can. 01675 static bool MightBeFoldableInst(Instruction *I) { 01676 switch (I->getOpcode()) { 01677 case Instruction::BitCast: 01678 case Instruction::AddrSpaceCast: 01679 // Don't touch identity bitcasts. 01680 if (I->getType() == I->getOperand(0)->getType()) 01681 return false; 01682 return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); 01683 case Instruction::PtrToInt: 01684 // PtrToInt is always a noop, as we know that the int type is pointer sized. 01685 return true; 01686 case Instruction::IntToPtr: 01687 // We know the input is intptr_t, so this is foldable. 01688 return true; 01689 case Instruction::Add: 01690 return true; 01691 case Instruction::Mul: 01692 case Instruction::Shl: 01693 // Can only handle X*C and X << C. 01694 return isa<ConstantInt>(I->getOperand(1)); 01695 case Instruction::GetElementPtr: 01696 return true; 01697 default: 01698 return false; 01699 } 01700 } 01701 01702 /// \brief Hepler class to perform type promotion. 01703 class TypePromotionHelper { 01704 /// \brief Utility function to check whether or not a sign extension of 01705 /// \p Inst with \p ConsideredSExtType can be moved through \p Inst by either 01706 /// using the operands of \p Inst or promoting \p Inst. 01707 /// In other words, check if: 01708 /// sext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredSExtType. 01709 /// #1 Promotion applies: 01710 /// ConsideredSExtType Inst (sext opnd1 to ConsideredSExtType, ...). 01711 /// #2 Operand reuses: 01712 /// sext opnd1 to ConsideredSExtType. 01713 /// \p PromotedInsts maps the instructions to their type before promotion. 01714 static bool canGetThrough(const Instruction *Inst, Type *ConsideredSExtType, 01715 const InstrToOrigTy &PromotedInsts); 01716 01717 /// \brief Utility function to determine if \p OpIdx should be promoted when 01718 /// promoting \p Inst. 01719 static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) { 01720 if (isa<SelectInst>(Inst) && OpIdx == 0) 01721 return false; 01722 return true; 01723 } 01724 01725 /// \brief Utility function to promote the operand of \p SExt when this 01726 /// operand is a promotable trunc or sext or zext. 01727 /// \p PromotedInsts maps the instructions to their type before promotion. 01728 /// \p CreatedInsts[out] contains how many non-free instructions have been 01729 /// created to promote the operand of SExt. 01730 /// Should never be called directly. 01731 /// \return The promoted value which is used instead of SExt. 01732 static Value *promoteOperandForTruncAndAnyExt(Instruction *SExt, 01733 TypePromotionTransaction &TPT, 01734 InstrToOrigTy &PromotedInsts, 01735 unsigned &CreatedInsts); 01736 01737 /// \brief Utility function to promote the operand of \p SExt when this 01738 /// operand is promotable and is not a supported trunc or sext. 01739 /// \p PromotedInsts maps the instructions to their type before promotion. 01740 /// \p CreatedInsts[out] contains how many non-free instructions have been 01741 /// created to promote the operand of SExt. 01742 /// Should never be called directly. 01743 /// \return The promoted value which is used instead of SExt. 01744 static Value *promoteOperandForOther(Instruction *SExt, 01745 TypePromotionTransaction &TPT, 01746 InstrToOrigTy &PromotedInsts, 01747 unsigned &CreatedInsts); 01748 01749 public: 01750 /// Type for the utility function that promotes the operand of SExt. 01751 typedef Value *(*Action)(Instruction *SExt, TypePromotionTransaction &TPT, 01752 InstrToOrigTy &PromotedInsts, 01753 unsigned &CreatedInsts); 01754 /// \brief Given a sign extend instruction \p SExt, return the approriate 01755 /// action to promote the operand of \p SExt instead of using SExt. 01756 /// \return NULL if no promotable action is possible with the current 01757 /// sign extension. 01758 /// \p InsertedTruncs keeps track of all the truncate instructions inserted by 01759 /// the others CodeGenPrepare optimizations. This information is important 01760 /// because we do not want to promote these instructions as CodeGenPrepare 01761 /// will reinsert them later. Thus creating an infinite loop: create/remove. 01762 /// \p PromotedInsts maps the instructions to their type before promotion. 01763 static Action getAction(Instruction *SExt, const SetOfInstrs &InsertedTruncs, 01764 const TargetLowering &TLI, 01765 const InstrToOrigTy &PromotedInsts); 01766 }; 01767 01768 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 01769 Type *ConsideredSExtType, 01770 const InstrToOrigTy &PromotedInsts) { 01771 // We can always get through sext or zext. 01772 if (isa<SExtInst>(Inst) || isa<ZExtInst>(Inst)) 01773 return true; 01774 01775 // We can get through binary operator, if it is legal. In other words, the 01776 // binary operator must have a nuw or nsw flag. 01777 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 01778 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 01779 (BinOp->hasNoUnsignedWrap() || BinOp->hasNoSignedWrap())) 01780 return true; 01781 01782 // Check if we can do the following simplification. 01783 // sext(trunc(sext)) --> sext 01784 if (!isa<TruncInst>(Inst)) 01785 return false; 01786 01787 Value *OpndVal = Inst->getOperand(0); 01788 // Check if we can use this operand in the sext. 01789 // If the type is larger than the result type of the sign extension, 01790 // we cannot. 01791 if (OpndVal->getType()->getIntegerBitWidth() > 01792 ConsideredSExtType->getIntegerBitWidth()) 01793 return false; 01794 01795 // If the operand of the truncate is not an instruction, we will not have 01796 // any information on the dropped bits. 01797 // (Actually we could for constant but it is not worth the extra logic). 01798 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 01799 if (!Opnd) 01800 return false; 01801 01802 // Check if the source of the type is narrow enough. 01803 // I.e., check that trunc just drops sign extended bits. 01804 // #1 get the type of the operand. 01805 const Type *OpndType; 01806 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 01807 if (It != PromotedInsts.end()) 01808 OpndType = It->second; 01809 else if (isa<SExtInst>(Opnd)) 01810 OpndType = cast<Instruction>(Opnd)->getOperand(0)->getType(); 01811 else 01812 return false; 01813 01814 // #2 check that the truncate just drop sign extended bits. 01815 if (Inst->getType()->getIntegerBitWidth() >= OpndType->getIntegerBitWidth()) 01816 return true; 01817 01818 return false; 01819 } 01820 01821 TypePromotionHelper::Action TypePromotionHelper::getAction( 01822 Instruction *SExt, const SetOfInstrs &InsertedTruncs, 01823 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 01824 Instruction *SExtOpnd = dyn_cast<Instruction>(SExt->getOperand(0)); 01825 Type *SExtTy = SExt->getType(); 01826 // If the operand of the sign extension is not an instruction, we cannot 01827 // get through. 01828 // If it, check we can get through. 01829 if (!SExtOpnd || !canGetThrough(SExtOpnd, SExtTy, PromotedInsts)) 01830 return nullptr; 01831 01832 // Do not promote if the operand has been added by codegenprepare. 01833 // Otherwise, it means we are undoing an optimization that is likely to be 01834 // redone, thus causing potential infinite loop. 01835 if (isa<TruncInst>(SExtOpnd) && InsertedTruncs.count(SExtOpnd)) 01836 return nullptr; 01837 01838 // SExt or Trunc instructions. 01839 // Return the related handler. 01840 if (isa<SExtInst>(SExtOpnd) || isa<TruncInst>(SExtOpnd) || 01841 isa<ZExtInst>(SExtOpnd)) 01842 return promoteOperandForTruncAndAnyExt; 01843 01844 // Regular instruction. 01845 // Abort early if we will have to insert non-free instructions. 01846 if (!SExtOpnd->hasOneUse() && 01847 !TLI.isTruncateFree(SExtTy, SExtOpnd->getType())) 01848 return nullptr; 01849 return promoteOperandForOther; 01850 } 01851 01852 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 01853 llvm::Instruction *SExt, TypePromotionTransaction &TPT, 01854 InstrToOrigTy &PromotedInsts, unsigned &CreatedInsts) { 01855 // By construction, the operand of SExt is an instruction. Otherwise we cannot 01856 // get through it and this method should not be called. 01857 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 01858 Value *ExtVal = SExt; 01859 if (isa<ZExtInst>(SExtOpnd)) { 01860 // Replace sext(zext(opnd)) 01861 // => zext(opnd). 01862 Value *ZExt = 01863 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 01864 TPT.replaceAllUsesWith(SExt, ZExt); 01865 TPT.eraseInstruction(SExt); 01866 ExtVal = ZExt; 01867 } else { 01868 // Replace sext(trunc(opnd)) or sext(sext(opnd)) 01869 // => sext(opnd). 01870 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 01871 } 01872 CreatedInsts = 0; 01873 01874 // Remove dead code. 01875 if (SExtOpnd->use_empty()) 01876 TPT.eraseInstruction(SExtOpnd); 01877 01878 // Check if the extension is still needed. 01879 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 01880 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) 01881 return ExtVal; 01882 01883 // At this point we have: ext ty opnd to ty. 01884 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 01885 Value *NextVal = ExtInst->getOperand(0); 01886 TPT.eraseInstruction(ExtInst, NextVal); 01887 return NextVal; 01888 } 01889 01890 Value * 01891 TypePromotionHelper::promoteOperandForOther(Instruction *SExt, 01892 TypePromotionTransaction &TPT, 01893 InstrToOrigTy &PromotedInsts, 01894 unsigned &CreatedInsts) { 01895 // By construction, the operand of SExt is an instruction. Otherwise we cannot 01896 // get through it and this method should not be called. 01897 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 01898 CreatedInsts = 0; 01899 if (!SExtOpnd->hasOneUse()) { 01900 // SExtOpnd will be promoted. 01901 // All its uses, but SExt, will need to use a truncated value of the 01902 // promoted version. 01903 // Create the truncate now. 01904 Value *Trunc = TPT.createTrunc(SExt, SExtOpnd->getType()); 01905 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 01906 ITrunc->removeFromParent(); 01907 // Insert it just after the definition. 01908 ITrunc->insertAfter(SExtOpnd); 01909 } 01910 01911 TPT.replaceAllUsesWith(SExtOpnd, Trunc); 01912 // Restore the operand of SExt (which has been replace by the previous call 01913 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 01914 TPT.setOperand(SExt, 0, SExtOpnd); 01915 } 01916 01917 // Get through the Instruction: 01918 // 1. Update its type. 01919 // 2. Replace the uses of SExt by Inst. 01920 // 3. Sign extend each operand that needs to be sign extended. 01921 01922 // Remember the original type of the instruction before promotion. 01923 // This is useful to know that the high bits are sign extended bits. 01924 PromotedInsts.insert( 01925 std::pair<Instruction *, Type *>(SExtOpnd, SExtOpnd->getType())); 01926 // Step #1. 01927 TPT.mutateType(SExtOpnd, SExt->getType()); 01928 // Step #2. 01929 TPT.replaceAllUsesWith(SExt, SExtOpnd); 01930 // Step #3. 01931 Instruction *SExtForOpnd = SExt; 01932 01933 DEBUG(dbgs() << "Propagate SExt to operands\n"); 01934 for (int OpIdx = 0, EndOpIdx = SExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 01935 ++OpIdx) { 01936 DEBUG(dbgs() << "Operand:\n" << *(SExtOpnd->getOperand(OpIdx)) << '\n'); 01937 if (SExtOpnd->getOperand(OpIdx)->getType() == SExt->getType() || 01938 !shouldSExtOperand(SExtOpnd, OpIdx)) { 01939 DEBUG(dbgs() << "No need to propagate\n"); 01940 continue; 01941 } 01942 // Check if we can statically sign extend the operand. 01943 Value *Opnd = SExtOpnd->getOperand(OpIdx); 01944 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 01945 DEBUG(dbgs() << "Statically sign extend\n"); 01946 TPT.setOperand( 01947 SExtOpnd, OpIdx, 01948 ConstantInt::getSigned(SExt->getType(), Cst->getSExtValue())); 01949 continue; 01950 } 01951 // UndefValue are typed, so we have to statically sign extend them. 01952 if (isa<UndefValue>(Opnd)) { 01953 DEBUG(dbgs() << "Statically sign extend\n"); 01954 TPT.setOperand(SExtOpnd, OpIdx, UndefValue::get(SExt->getType())); 01955 continue; 01956 } 01957 01958 // Otherwise we have to explicity sign extend the operand. 01959 // Check if SExt was reused to sign extend an operand. 01960 if (!SExtForOpnd) { 01961 // If yes, create a new one. 01962 DEBUG(dbgs() << "More operands to sext\n"); 01963 SExtForOpnd = 01964 cast<Instruction>(TPT.createSExt(SExt, Opnd, SExt->getType())); 01965 ++CreatedInsts; 01966 } 01967 01968 TPT.setOperand(SExtForOpnd, 0, Opnd); 01969 01970 // Move the sign extension before the insertion point. 01971 TPT.moveBefore(SExtForOpnd, SExtOpnd); 01972 TPT.setOperand(SExtOpnd, OpIdx, SExtForOpnd); 01973 // If more sext are required, new instructions will have to be created. 01974 SExtForOpnd = nullptr; 01975 } 01976 if (SExtForOpnd == SExt) { 01977 DEBUG(dbgs() << "Sign extension is useless now\n"); 01978 TPT.eraseInstruction(SExt); 01979 } 01980 return SExtOpnd; 01981 } 01982 01983 /// IsPromotionProfitable - Check whether or not promoting an instruction 01984 /// to a wider type was profitable. 01985 /// \p MatchedSize gives the number of instructions that have been matched 01986 /// in the addressing mode after the promotion was applied. 01987 /// \p SizeWithPromotion gives the number of created instructions for 01988 /// the promotion plus the number of instructions that have been 01989 /// matched in the addressing mode before the promotion. 01990 /// \p PromotedOperand is the value that has been promoted. 01991 /// \return True if the promotion is profitable, false otherwise. 01992 bool 01993 AddressingModeMatcher::IsPromotionProfitable(unsigned MatchedSize, 01994 unsigned SizeWithPromotion, 01995 Value *PromotedOperand) const { 01996 // We folded less instructions than what we created to promote the operand. 01997 // This is not profitable. 01998 if (MatchedSize < SizeWithPromotion) 01999 return false; 02000 if (MatchedSize > SizeWithPromotion) 02001 return true; 02002 // The promotion is neutral but it may help folding the sign extension in 02003 // loads for instance. 02004 // Check that we did not create an illegal instruction. 02005 Instruction *PromotedInst = dyn_cast<Instruction>(PromotedOperand); 02006 if (!PromotedInst) 02007 return false; 02008 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 02009 // If the ISDOpcode is undefined, it was undefined before the promotion. 02010 if (!ISDOpcode) 02011 return true; 02012 // Otherwise, check if the promoted instruction is legal or not. 02013 return TLI.isOperationLegalOrCustom(ISDOpcode, 02014 EVT::getEVT(PromotedInst->getType())); 02015 } 02016 02017 /// MatchOperationAddr - Given an instruction or constant expr, see if we can 02018 /// fold the operation into the addressing mode. If so, update the addressing 02019 /// mode and return true, otherwise return false without modifying AddrMode. 02020 /// If \p MovedAway is not NULL, it contains the information of whether or 02021 /// not AddrInst has to be folded into the addressing mode on success. 02022 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 02023 /// because it has been moved away. 02024 /// Thus AddrInst must not be added in the matched instructions. 02025 /// This state can happen when AddrInst is a sext, since it may be moved away. 02026 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 02027 /// not be referenced anymore. 02028 bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode, 02029 unsigned Depth, 02030 bool *MovedAway) { 02031 // Avoid exponential behavior on extremely deep expression trees. 02032 if (Depth >= 5) return false; 02033 02034 // By default, all matched instructions stay in place. 02035 if (MovedAway) 02036 *MovedAway = false; 02037 02038 switch (Opcode) { 02039 case Instruction::PtrToInt: 02040 // PtrToInt is always a noop, as we know that the int type is pointer sized. 02041 return MatchAddr(AddrInst->getOperand(0), Depth); 02042 case Instruction::IntToPtr: 02043 // This inttoptr is a no-op if the integer type is pointer sized. 02044 if (TLI.getValueType(AddrInst->getOperand(0)->getType()) == 02045 TLI.getPointerTy(AddrInst->getType()->getPointerAddressSpace())) 02046 return MatchAddr(AddrInst->getOperand(0), Depth); 02047 return false; 02048 case Instruction::BitCast: 02049 case Instruction::AddrSpaceCast: 02050 // BitCast is always a noop, and we can handle it as long as it is 02051 // int->int or pointer->pointer (we don't want int<->fp or something). 02052 if ((AddrInst->getOperand(0)->getType()->isPointerTy() || 02053 AddrInst->getOperand(0)->getType()->isIntegerTy()) && 02054 // Don't touch identity bitcasts. These were probably put here by LSR, 02055 // and we don't want to mess around with them. Assume it knows what it 02056 // is doing. 02057 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 02058 return MatchAddr(AddrInst->getOperand(0), Depth); 02059 return false; 02060 case Instruction::Add: { 02061 // Check to see if we can merge in the RHS then the LHS. If so, we win. 02062 ExtAddrMode BackupAddrMode = AddrMode; 02063 unsigned OldSize = AddrModeInsts.size(); 02064 // Start a transaction at this point. 02065 // The LHS may match but not the RHS. 02066 // Therefore, we need a higher level restoration point to undo partially 02067 // matched operation. 02068 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 02069 TPT.getRestorationPoint(); 02070 02071 if (MatchAddr(AddrInst->getOperand(1), Depth+1) && 02072 MatchAddr(AddrInst->getOperand(0), Depth+1)) 02073 return true; 02074 02075 // Restore the old addr mode info. 02076 AddrMode = BackupAddrMode; 02077 AddrModeInsts.resize(OldSize); 02078 TPT.rollback(LastKnownGood); 02079 02080 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 02081 if (MatchAddr(AddrInst->getOperand(0), Depth+1) && 02082 MatchAddr(AddrInst->getOperand(1), Depth+1)) 02083 return true; 02084 02085 // Otherwise we definitely can't merge the ADD in. 02086 AddrMode = BackupAddrMode; 02087 AddrModeInsts.resize(OldSize); 02088 TPT.rollback(LastKnownGood); 02089 break; 02090 } 02091 //case Instruction::Or: 02092 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 02093 //break; 02094 case Instruction::Mul: 02095 case Instruction::Shl: { 02096 // Can only handle X*C and X << C. 02097 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 02098 if (!RHS) 02099 return false; 02100 int64_t Scale = RHS->getSExtValue(); 02101 if (Opcode == Instruction::Shl) 02102 Scale = 1LL << Scale; 02103 02104 return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth); 02105 } 02106 case Instruction::GetElementPtr: { 02107 // Scan the GEP. We check it if it contains constant offsets and at most 02108 // one variable offset. 02109 int VariableOperand = -1; 02110 unsigned VariableScale = 0; 02111 02112 int64_t ConstantOffset = 0; 02113 const DataLayout *TD = TLI.getDataLayout(); 02114 gep_type_iterator GTI = gep_type_begin(AddrInst); 02115 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 02116 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 02117 const StructLayout *SL = TD->getStructLayout(STy); 02118 unsigned Idx = 02119 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 02120 ConstantOffset += SL->getElementOffset(Idx); 02121 } else { 02122 uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType()); 02123 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 02124 ConstantOffset += CI->getSExtValue()*TypeSize; 02125 } else if (TypeSize) { // Scales of zero don't do anything. 02126 // We only allow one variable index at the moment. 02127 if (VariableOperand != -1) 02128 return false; 02129 02130 // Remember the variable index. 02131 VariableOperand = i; 02132 VariableScale = TypeSize; 02133 } 02134 } 02135 } 02136 02137 // A common case is for the GEP to only do a constant offset. In this case, 02138 // just add it to the disp field and check validity. 02139 if (VariableOperand == -1) { 02140 AddrMode.BaseOffs += ConstantOffset; 02141 if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){ 02142 // Check to see if we can fold the base pointer in too. 02143 if (MatchAddr(AddrInst->getOperand(0), Depth+1)) 02144 return true; 02145 } 02146 AddrMode.BaseOffs -= ConstantOffset; 02147 return false; 02148 } 02149 02150 // Save the valid addressing mode in case we can't match. 02151 ExtAddrMode BackupAddrMode = AddrMode; 02152 unsigned OldSize = AddrModeInsts.size(); 02153 02154 // See if the scale and offset amount is valid for this target. 02155 AddrMode.BaseOffs += ConstantOffset; 02156 02157 // Match the base operand of the GEP. 02158 if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) { 02159 // If it couldn't be matched, just stuff the value in a register. 02160 if (AddrMode.HasBaseReg) { 02161 AddrMode = BackupAddrMode; 02162 AddrModeInsts.resize(OldSize); 02163 return false; 02164 } 02165 AddrMode.HasBaseReg = true; 02166 AddrMode.BaseReg = AddrInst->getOperand(0); 02167 } 02168 02169 // Match the remaining variable portion of the GEP. 02170 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 02171 Depth)) { 02172 // If it couldn't be matched, try stuffing the base into a register 02173 // instead of matching it, and retrying the match of the scale. 02174 AddrMode = BackupAddrMode; 02175 AddrModeInsts.resize(OldSize); 02176 if (AddrMode.HasBaseReg) 02177 return false; 02178 AddrMode.HasBaseReg = true; 02179 AddrMode.BaseReg = AddrInst->getOperand(0); 02180 AddrMode.BaseOffs += ConstantOffset; 02181 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), 02182 VariableScale, Depth)) { 02183 // If even that didn't work, bail. 02184 AddrMode = BackupAddrMode; 02185 AddrModeInsts.resize(OldSize); 02186 return false; 02187 } 02188 } 02189 02190 return true; 02191 } 02192 case Instruction::SExt: { 02193 Instruction *SExt = dyn_cast<Instruction>(AddrInst); 02194 if (!SExt) 02195 return false; 02196 02197 // Try to move this sext out of the way of the addressing mode. 02198 // Ask for a method for doing so. 02199 TypePromotionHelper::Action TPH = TypePromotionHelper::getAction( 02200 SExt, InsertedTruncs, TLI, PromotedInsts); 02201 if (!TPH) 02202 return false; 02203 02204 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 02205 TPT.getRestorationPoint(); 02206 unsigned CreatedInsts = 0; 02207 Value *PromotedOperand = TPH(SExt, TPT, PromotedInsts, CreatedInsts); 02208 // SExt has been moved away. 02209 // Thus either it will be rematched later in the recursive calls or it is 02210 // gone. Anyway, we must not fold it into the addressing mode at this point. 02211 // E.g., 02212 // op = add opnd, 1 02213 // idx = sext op 02214 // addr = gep base, idx 02215 // is now: 02216 // promotedOpnd = sext opnd <- no match here 02217 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 02218 // addr = gep base, op <- match 02219 if (MovedAway) 02220 *MovedAway = true; 02221 02222 assert(PromotedOperand && 02223 "TypePromotionHelper should have filtered out those cases"); 02224 02225 ExtAddrMode BackupAddrMode = AddrMode; 02226 unsigned OldSize = AddrModeInsts.size(); 02227 02228 if (!MatchAddr(PromotedOperand, Depth) || 02229 !IsPromotionProfitable(AddrModeInsts.size(), OldSize + CreatedInsts, 02230 PromotedOperand)) { 02231 AddrMode = BackupAddrMode; 02232 AddrModeInsts.resize(OldSize); 02233 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 02234 TPT.rollback(LastKnownGood); 02235 return false; 02236 } 02237 return true; 02238 } 02239 } 02240 return false; 02241 } 02242 02243 /// MatchAddr - If we can, try to add the value of 'Addr' into the current 02244 /// addressing mode. If Addr can't be added to AddrMode this returns false and 02245 /// leaves AddrMode unmodified. This assumes that Addr is either a pointer type 02246 /// or intptr_t for the target. 02247 /// 02248 bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) { 02249 // Start a transaction at this point that we will rollback if the matching 02250 // fails. 02251 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 02252 TPT.getRestorationPoint(); 02253 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 02254 // Fold in immediates if legal for the target. 02255 AddrMode.BaseOffs += CI->getSExtValue(); 02256 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 02257 return true; 02258 AddrMode.BaseOffs -= CI->getSExtValue(); 02259 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 02260 // If this is a global variable, try to fold it into the addressing mode. 02261 if (!AddrMode.BaseGV) { 02262 AddrMode.BaseGV = GV; 02263 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 02264 return true; 02265 AddrMode.BaseGV = nullptr; 02266 } 02267 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 02268 ExtAddrMode BackupAddrMode = AddrMode; 02269 unsigned OldSize = AddrModeInsts.size(); 02270 02271 // Check to see if it is possible to fold this operation. 02272 bool MovedAway = false; 02273 if (MatchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 02274 // This instruction may have been move away. If so, there is nothing 02275 // to check here. 02276 if (MovedAway) 02277 return true; 02278 // Okay, it's possible to fold this. Check to see if it is actually 02279 // *profitable* to do so. We use a simple cost model to avoid increasing 02280 // register pressure too much. 02281 if (I->hasOneUse() || 02282 IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 02283 AddrModeInsts.push_back(I); 02284 return true; 02285 } 02286 02287 // It isn't profitable to do this, roll back. 02288 //cerr << "NOT FOLDING: " << *I; 02289 AddrMode = BackupAddrMode; 02290 AddrModeInsts.resize(OldSize); 02291 TPT.rollback(LastKnownGood); 02292 } 02293 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 02294 if (MatchOperationAddr(CE, CE->getOpcode(), Depth)) 02295 return true; 02296 TPT.rollback(LastKnownGood); 02297 } else if (isa<ConstantPointerNull>(Addr)) { 02298 // Null pointer gets folded without affecting the addressing mode. 02299 return true; 02300 } 02301 02302 // Worse case, the target should support [reg] addressing modes. :) 02303 if (!AddrMode.HasBaseReg) { 02304 AddrMode.HasBaseReg = true; 02305 AddrMode.BaseReg = Addr; 02306 // Still check for legality in case the target supports [imm] but not [i+r]. 02307 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 02308 return true; 02309 AddrMode.HasBaseReg = false; 02310 AddrMode.BaseReg = nullptr; 02311 } 02312 02313 // If the base register is already taken, see if we can do [r+r]. 02314 if (AddrMode.Scale == 0) { 02315 AddrMode.Scale = 1; 02316 AddrMode.ScaledReg = Addr; 02317 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 02318 return true; 02319 AddrMode.Scale = 0; 02320 AddrMode.ScaledReg = nullptr; 02321 } 02322 // Couldn't match. 02323 TPT.rollback(LastKnownGood); 02324 return false; 02325 } 02326 02327 /// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified 02328 /// inline asm call are due to memory operands. If so, return true, otherwise 02329 /// return false. 02330 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 02331 const TargetLowering &TLI) { 02332 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(ImmutableCallSite(CI)); 02333 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 02334 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 02335 02336 // Compute the constraint code and ConstraintType to use. 02337 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 02338 02339 // If this asm operand is our Value*, and if it isn't an indirect memory 02340 // operand, we can't fold it! 02341 if (OpInfo.CallOperandVal == OpVal && 02342 (OpInfo.ConstraintType != TargetLowering::C_Memory || 02343 !OpInfo.isIndirect)) 02344 return false; 02345 } 02346 02347 return true; 02348 } 02349 02350 /// FindAllMemoryUses - Recursively walk all the uses of I until we find a 02351 /// memory use. If we find an obviously non-foldable instruction, return true. 02352 /// Add the ultimately found memory instructions to MemoryUses. 02353 static bool FindAllMemoryUses(Instruction *I, 02354 SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses, 02355 SmallPtrSetImpl<Instruction*> &ConsideredInsts, 02356 const TargetLowering &TLI) { 02357 // If we already considered this instruction, we're done. 02358 if (!ConsideredInsts.insert(I)) 02359 return false; 02360 02361 // If this is an obviously unfoldable instruction, bail out. 02362 if (!MightBeFoldableInst(I)) 02363 return true; 02364 02365 // Loop over all the uses, recursively processing them. 02366 for (Use &U : I->uses()) { 02367 Instruction *UserI = cast<Instruction>(U.getUser()); 02368 02369 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 02370 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 02371 continue; 02372 } 02373 02374 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 02375 unsigned opNo = U.getOperandNo(); 02376 if (opNo == 0) return true; // Storing addr, not into addr. 02377 MemoryUses.push_back(std::make_pair(SI, opNo)); 02378 continue; 02379 } 02380 02381 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 02382 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 02383 if (!IA) return true; 02384 02385 // If this is a memory operand, we're cool, otherwise bail out. 02386 if (!IsOperandAMemoryOperand(CI, IA, I, TLI)) 02387 return true; 02388 continue; 02389 } 02390 02391 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI)) 02392 return true; 02393 } 02394 02395 return false; 02396 } 02397 02398 /// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at 02399 /// the use site that we're folding it into. If so, there is no cost to 02400 /// include it in the addressing mode. KnownLive1 and KnownLive2 are two values 02401 /// that we know are live at the instruction already. 02402 bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 02403 Value *KnownLive2) { 02404 // If Val is either of the known-live values, we know it is live! 02405 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 02406 return true; 02407 02408 // All values other than instructions and arguments (e.g. constants) are live. 02409 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 02410 02411 // If Val is a constant sized alloca in the entry block, it is live, this is 02412 // true because it is just a reference to the stack/frame pointer, which is 02413 // live for the whole function. 02414 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 02415 if (AI->isStaticAlloca()) 02416 return true; 02417 02418 // Check to see if this value is already used in the memory instruction's 02419 // block. If so, it's already live into the block at the very least, so we 02420 // can reasonably fold it. 02421 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 02422 } 02423 02424 /// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing 02425 /// mode of the machine to fold the specified instruction into a load or store 02426 /// that ultimately uses it. However, the specified instruction has multiple 02427 /// uses. Given this, it may actually increase register pressure to fold it 02428 /// into the load. For example, consider this code: 02429 /// 02430 /// X = ... 02431 /// Y = X+1 02432 /// use(Y) -> nonload/store 02433 /// Z = Y+1 02434 /// load Z 02435 /// 02436 /// In this case, Y has multiple uses, and can be folded into the load of Z 02437 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 02438 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 02439 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 02440 /// number of computations either. 02441 /// 02442 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 02443 /// X was live across 'load Z' for other reasons, we actually *would* want to 02444 /// fold the addressing mode in the Z case. This would make Y die earlier. 02445 bool AddressingModeMatcher:: 02446 IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 02447 ExtAddrMode &AMAfter) { 02448 if (IgnoreProfitability) return true; 02449 02450 // AMBefore is the addressing mode before this instruction was folded into it, 02451 // and AMAfter is the addressing mode after the instruction was folded. Get 02452 // the set of registers referenced by AMAfter and subtract out those 02453 // referenced by AMBefore: this is the set of values which folding in this 02454 // address extends the lifetime of. 02455 // 02456 // Note that there are only two potential values being referenced here, 02457 // BaseReg and ScaleReg (global addresses are always available, as are any 02458 // folded immediates). 02459 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 02460 02461 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 02462 // lifetime wasn't extended by adding this instruction. 02463 if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 02464 BaseReg = nullptr; 02465 if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 02466 ScaledReg = nullptr; 02467 02468 // If folding this instruction (and it's subexprs) didn't extend any live 02469 // ranges, we're ok with it. 02470 if (!BaseReg && !ScaledReg) 02471 return true; 02472 02473 // If all uses of this instruction are ultimately load/store/inlineasm's, 02474 // check to see if their addressing modes will include this instruction. If 02475 // so, we can fold it into all uses, so it doesn't matter if it has multiple 02476 // uses. 02477 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 02478 SmallPtrSet<Instruction*, 16> ConsideredInsts; 02479 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI)) 02480 return false; // Has a non-memory, non-foldable use! 02481 02482 // Now that we know that all uses of this instruction are part of a chain of 02483 // computation involving only operations that could theoretically be folded 02484 // into a memory use, loop over each of these uses and see if they could 02485 // *actually* fold the instruction. 02486 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 02487 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 02488 Instruction *User = MemoryUses[i].first; 02489 unsigned OpNo = MemoryUses[i].second; 02490 02491 // Get the access type of this use. If the use isn't a pointer, we don't 02492 // know what it accesses. 02493 Value *Address = User->getOperand(OpNo); 02494 if (!Address->getType()->isPointerTy()) 02495 return false; 02496 Type *AddressAccessTy = Address->getType()->getPointerElementType(); 02497 02498 // Do a match against the root of this address, ignoring profitability. This 02499 // will tell us if the addressing mode for the memory operation will 02500 // *actually* cover the shared instruction. 02501 ExtAddrMode Result; 02502 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 02503 TPT.getRestorationPoint(); 02504 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy, 02505 MemoryInst, Result, InsertedTruncs, 02506 PromotedInsts, TPT); 02507 Matcher.IgnoreProfitability = true; 02508 bool Success = Matcher.MatchAddr(Address, 0); 02509 (void)Success; assert(Success && "Couldn't select *anything*?"); 02510 02511 // The match was to check the profitability, the changes made are not 02512 // part of the original matcher. Therefore, they should be dropped 02513 // otherwise the original matcher will not present the right state. 02514 TPT.rollback(LastKnownGood); 02515 02516 // If the match didn't cover I, then it won't be shared by it. 02517 if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(), 02518 I) == MatchedAddrModeInsts.end()) 02519 return false; 02520 02521 MatchedAddrModeInsts.clear(); 02522 } 02523 02524 return true; 02525 } 02526 02527 } // end anonymous namespace 02528 02529 /// IsNonLocalValue - Return true if the specified values are defined in a 02530 /// different basic block than BB. 02531 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 02532 if (Instruction *I = dyn_cast<Instruction>(V)) 02533 return I->getParent() != BB; 02534 return false; 02535 } 02536 02537 /// OptimizeMemoryInst - Load and Store Instructions often have 02538 /// addressing modes that can do significant amounts of computation. As such, 02539 /// instruction selection will try to get the load or store to do as much 02540 /// computation as possible for the program. The problem is that isel can only 02541 /// see within a single block. As such, we sink as much legal addressing mode 02542 /// stuff into the block as possible. 02543 /// 02544 /// This method is used to optimize both load/store and inline asms with memory 02545 /// operands. 02546 bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 02547 Type *AccessTy) { 02548 Value *Repl = Addr; 02549 02550 // Try to collapse single-value PHI nodes. This is necessary to undo 02551 // unprofitable PRE transformations. 02552 SmallVector<Value*, 8> worklist; 02553 SmallPtrSet<Value*, 16> Visited; 02554 worklist.push_back(Addr); 02555 02556 // Use a worklist to iteratively look through PHI nodes, and ensure that 02557 // the addressing mode obtained from the non-PHI roots of the graph 02558 // are equivalent. 02559 Value *Consensus = nullptr; 02560 unsigned NumUsesConsensus = 0; 02561 bool IsNumUsesConsensusValid = false; 02562 SmallVector<Instruction*, 16> AddrModeInsts; 02563 ExtAddrMode AddrMode; 02564 TypePromotionTransaction TPT; 02565 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 02566 TPT.getRestorationPoint(); 02567 while (!worklist.empty()) { 02568 Value *V = worklist.back(); 02569 worklist.pop_back(); 02570 02571 // Break use-def graph loops. 02572 if (!Visited.insert(V)) { 02573 Consensus = nullptr; 02574 break; 02575 } 02576 02577 // For a PHI node, push all of its incoming values. 02578 if (PHINode *P = dyn_cast<PHINode>(V)) { 02579 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) 02580 worklist.push_back(P->getIncomingValue(i)); 02581 continue; 02582 } 02583 02584 // For non-PHIs, determine the addressing mode being computed. 02585 SmallVector<Instruction*, 16> NewAddrModeInsts; 02586 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 02587 V, AccessTy, MemoryInst, NewAddrModeInsts, *TLI, InsertedTruncsSet, 02588 PromotedInsts, TPT); 02589 02590 // This check is broken into two cases with very similar code to avoid using 02591 // getNumUses() as much as possible. Some values have a lot of uses, so 02592 // calling getNumUses() unconditionally caused a significant compile-time 02593 // regression. 02594 if (!Consensus) { 02595 Consensus = V; 02596 AddrMode = NewAddrMode; 02597 AddrModeInsts = NewAddrModeInsts; 02598 continue; 02599 } else if (NewAddrMode == AddrMode) { 02600 if (!IsNumUsesConsensusValid) { 02601 NumUsesConsensus = Consensus->getNumUses(); 02602 IsNumUsesConsensusValid = true; 02603 } 02604 02605 // Ensure that the obtained addressing mode is equivalent to that obtained 02606 // for all other roots of the PHI traversal. Also, when choosing one 02607 // such root as representative, select the one with the most uses in order 02608 // to keep the cost modeling heuristics in AddressingModeMatcher 02609 // applicable. 02610 unsigned NumUses = V->getNumUses(); 02611 if (NumUses > NumUsesConsensus) { 02612 Consensus = V; 02613 NumUsesConsensus = NumUses; 02614 AddrModeInsts = NewAddrModeInsts; 02615 } 02616 continue; 02617 } 02618 02619 Consensus = nullptr; 02620 break; 02621 } 02622 02623 // If the addressing mode couldn't be determined, or if multiple different 02624 // ones were determined, bail out now. 02625 if (!Consensus) { 02626 TPT.rollback(LastKnownGood); 02627 return false; 02628 } 02629 TPT.commit(); 02630 02631 // Check to see if any of the instructions supersumed by this addr mode are 02632 // non-local to I's BB. 02633 bool AnyNonLocal = false; 02634 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { 02635 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { 02636 AnyNonLocal = true; 02637 break; 02638 } 02639 } 02640 02641 // If all the instructions matched are already in this BB, don't do anything. 02642 if (!AnyNonLocal) { 02643 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 02644 return false; 02645 } 02646 02647 // Insert this computation right after this user. Since our caller is 02648 // scanning from the top of the BB to the bottom, reuse of the expr are 02649 // guaranteed to happen later. 02650 IRBuilder<> Builder(MemoryInst); 02651 02652 // Now that we determined the addressing expression we want to use and know 02653 // that we have to sink it into this block. Check to see if we have already 02654 // done this for some other load/store instr in this block. If so, reuse the 02655 // computation. 02656 Value *&SunkAddr = SunkAddrs[Addr]; 02657 if (SunkAddr) { 02658 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 02659 << *MemoryInst << "\n"); 02660 if (SunkAddr->getType() != Addr->getType()) 02661 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 02662 } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() && 02663 TM && TM->getSubtarget<TargetSubtargetInfo>().useAA())) { 02664 // By default, we use the GEP-based method when AA is used later. This 02665 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 02666 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 02667 << *MemoryInst << "\n"); 02668 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType()); 02669 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 02670 02671 // First, find the pointer. 02672 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 02673 ResultPtr = AddrMode.BaseReg; 02674 AddrMode.BaseReg = nullptr; 02675 } 02676 02677 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 02678 // We can't add more than one pointer together, nor can we scale a 02679 // pointer (both of which seem meaningless). 02680 if (ResultPtr || AddrMode.Scale != 1) 02681 return false; 02682 02683 ResultPtr = AddrMode.ScaledReg; 02684 AddrMode.Scale = 0; 02685 } 02686 02687 if (AddrMode.BaseGV) { 02688 if (ResultPtr) 02689 return false; 02690 02691 ResultPtr = AddrMode.BaseGV; 02692 } 02693 02694 // If the real base value actually came from an inttoptr, then the matcher 02695 // will look through it and provide only the integer value. In that case, 02696 // use it here. 02697 if (!ResultPtr && AddrMode.BaseReg) { 02698 ResultPtr = 02699 Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr"); 02700 AddrMode.BaseReg = nullptr; 02701 } else if (!ResultPtr && AddrMode.Scale == 1) { 02702 ResultPtr = 02703 Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr"); 02704 AddrMode.Scale = 0; 02705 } 02706 02707 if (!ResultPtr && 02708 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 02709 SunkAddr = Constant::getNullValue(Addr->getType()); 02710 } else if (!ResultPtr) { 02711 return false; 02712 } else { 02713 Type *I8PtrTy = 02714 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 02715 02716 // Start with the base register. Do this first so that subsequent address 02717 // matching finds it last, which will prevent it from trying to match it 02718 // as the scaled value in case it happens to be a mul. That would be 02719 // problematic if we've sunk a different mul for the scale, because then 02720 // we'd end up sinking both muls. 02721 if (AddrMode.BaseReg) { 02722 Value *V = AddrMode.BaseReg; 02723 if (V->getType() != IntPtrTy) 02724 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 02725 02726 ResultIndex = V; 02727 } 02728 02729 // Add the scale value. 02730 if (AddrMode.Scale) { 02731 Value *V = AddrMode.ScaledReg; 02732 if (V->getType() == IntPtrTy) { 02733 // done. 02734 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 02735 cast<IntegerType>(V->getType())->getBitWidth()) { 02736 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 02737 } else { 02738 // It is only safe to sign extend the BaseReg if we know that the math 02739 // required to create it did not overflow before we extend it. Since 02740 // the original IR value was tossed in favor of a constant back when 02741 // the AddrMode was created we need to bail out gracefully if widths 02742 // do not match instead of extending it. 02743 Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex); 02744 if (I && (ResultIndex != AddrMode.BaseReg)) 02745 I->eraseFromParent(); 02746 return false; 02747 } 02748 02749 if (AddrMode.Scale != 1) 02750 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 02751 "sunkaddr"); 02752 if (ResultIndex) 02753 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 02754 else 02755 ResultIndex = V; 02756 } 02757 02758 // Add in the Base Offset if present. 02759 if (AddrMode.BaseOffs) { 02760 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 02761 if (ResultIndex) { 02762 // We need to add this separately from the scale above to help with 02763 // SDAG consecutive load/store merging. 02764 if (ResultPtr->getType() != I8PtrTy) 02765 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); 02766 ResultPtr = Builder.CreateGEP(ResultPtr, ResultIndex, "sunkaddr"); 02767 } 02768 02769 ResultIndex = V; 02770 } 02771 02772 if (!ResultIndex) { 02773 SunkAddr = ResultPtr; 02774 } else { 02775 if (ResultPtr->getType() != I8PtrTy) 02776 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); 02777 SunkAddr = Builder.CreateGEP(ResultPtr, ResultIndex, "sunkaddr"); 02778 } 02779 02780 if (SunkAddr->getType() != Addr->getType()) 02781 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 02782 } 02783 } else { 02784 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 02785 << *MemoryInst << "\n"); 02786 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType()); 02787 Value *Result = nullptr; 02788 02789 // Start with the base register. Do this first so that subsequent address 02790 // matching finds it last, which will prevent it from trying to match it 02791 // as the scaled value in case it happens to be a mul. That would be 02792 // problematic if we've sunk a different mul for the scale, because then 02793 // we'd end up sinking both muls. 02794 if (AddrMode.BaseReg) { 02795 Value *V = AddrMode.BaseReg; 02796 if (V->getType()->isPointerTy()) 02797 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 02798 if (V->getType() != IntPtrTy) 02799 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 02800 Result = V; 02801 } 02802 02803 // Add the scale value. 02804 if (AddrMode.Scale) { 02805 Value *V = AddrMode.ScaledReg; 02806 if (V->getType() == IntPtrTy) { 02807 // done. 02808 } else if (V->getType()->isPointerTy()) { 02809 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 02810 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 02811 cast<IntegerType>(V->getType())->getBitWidth()) { 02812 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 02813 } else { 02814 // It is only safe to sign extend the BaseReg if we know that the math 02815 // required to create it did not overflow before we extend it. Since 02816 // the original IR value was tossed in favor of a constant back when 02817 // the AddrMode was created we need to bail out gracefully if widths 02818 // do not match instead of extending it. 02819 Instruction *I = dyn_cast_or_null<Instruction>(Result); 02820 if (I && (Result != AddrMode.BaseReg)) 02821 I->eraseFromParent(); 02822 return false; 02823 } 02824 if (AddrMode.Scale != 1) 02825 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 02826 "sunkaddr"); 02827 if (Result) 02828 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 02829 else 02830 Result = V; 02831 } 02832 02833 // Add in the BaseGV if present. 02834 if (AddrMode.BaseGV) { 02835 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 02836 if (Result) 02837 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 02838 else 02839 Result = V; 02840 } 02841 02842 // Add in the Base Offset if present. 02843 if (AddrMode.BaseOffs) { 02844 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 02845 if (Result) 02846 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 02847 else 02848 Result = V; 02849 } 02850 02851 if (!Result) 02852 SunkAddr = Constant::getNullValue(Addr->getType()); 02853 else 02854 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 02855 } 02856 02857 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 02858 02859 // If we have no uses, recursively delete the value and all dead instructions 02860 // using it. 02861 if (Repl->use_empty()) { 02862 // This can cause recursive deletion, which can invalidate our iterator. 02863 // Use a WeakVH to hold onto it in case this happens. 02864 WeakVH IterHandle(CurInstIterator); 02865 BasicBlock *BB = CurInstIterator->getParent(); 02866 02867 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 02868 02869 if (IterHandle != CurInstIterator) { 02870 // If the iterator instruction was recursively deleted, start over at the 02871 // start of the block. 02872 CurInstIterator = BB->begin(); 02873 SunkAddrs.clear(); 02874 } 02875 } 02876 ++NumMemoryInsts; 02877 return true; 02878 } 02879 02880 /// OptimizeInlineAsmInst - If there are any memory operands, use 02881 /// OptimizeMemoryInst to sink their address computing into the block when 02882 /// possible / profitable. 02883 bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) { 02884 bool MadeChange = false; 02885 02886 TargetLowering::AsmOperandInfoVector 02887 TargetConstraints = TLI->ParseConstraints(CS); 02888 unsigned ArgNo = 0; 02889 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 02890 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 02891 02892 // Compute the constraint code and ConstraintType to use. 02893 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 02894 02895 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 02896 OpInfo.isIndirect) { 02897 Value *OpVal = CS->getArgOperand(ArgNo++); 02898 MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType()); 02899 } else if (OpInfo.Type == InlineAsm::isInput) 02900 ArgNo++; 02901 } 02902 02903 return MadeChange; 02904 } 02905 02906 /// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same 02907 /// basic block as the load, unless conditions are unfavorable. This allows 02908 /// SelectionDAG to fold the extend into the load. 02909 /// 02910 bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) { 02911 // Look for a load being extended. 02912 LoadInst *LI = dyn_cast<LoadInst>(I->getOperand(0)); 02913 if (!LI) return false; 02914 02915 // If they're already in the same block, there's nothing to do. 02916 if (LI->getParent() == I->getParent()) 02917 return false; 02918 02919 // If the load has other users and the truncate is not free, this probably 02920 // isn't worthwhile. 02921 if (!LI->hasOneUse() && 02922 TLI && (TLI->isTypeLegal(TLI->getValueType(LI->getType())) || 02923 !TLI->isTypeLegal(TLI->getValueType(I->getType()))) && 02924 !TLI->isTruncateFree(I->getType(), LI->getType())) 02925 return false; 02926 02927 // Check whether the target supports casts folded into loads. 02928 unsigned LType; 02929 if (isa<ZExtInst>(I)) 02930 LType = ISD::ZEXTLOAD; 02931 else { 02932 assert(isa<SExtInst>(I) && "Unexpected ext type!"); 02933 LType = ISD::SEXTLOAD; 02934 } 02935 if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType()))) 02936 return false; 02937 02938 // Move the extend into the same block as the load, so that SelectionDAG 02939 // can fold it. 02940 I->removeFromParent(); 02941 I->insertAfter(LI); 02942 ++NumExtsMoved; 02943 return true; 02944 } 02945 02946 bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { 02947 BasicBlock *DefBB = I->getParent(); 02948 02949 // If the result of a {s|z}ext and its source are both live out, rewrite all 02950 // other uses of the source with result of extension. 02951 Value *Src = I->getOperand(0); 02952 if (Src->hasOneUse()) 02953 return false; 02954 02955 // Only do this xform if truncating is free. 02956 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 02957 return false; 02958 02959 // Only safe to perform the optimization if the source is also defined in 02960 // this block. 02961 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 02962 return false; 02963 02964 bool DefIsLiveOut = false; 02965 for (User *U : I->users()) { 02966 Instruction *UI = cast<Instruction>(U); 02967 02968 // Figure out which BB this ext is used in. 02969 BasicBlock *UserBB = UI->getParent(); 02970 if (UserBB == DefBB) continue; 02971 DefIsLiveOut = true; 02972 break; 02973 } 02974 if (!DefIsLiveOut) 02975 return false; 02976 02977 // Make sure none of the uses are PHI nodes. 02978 for (User *U : Src->users()) { 02979 Instruction *UI = cast<Instruction>(U); 02980 BasicBlock *UserBB = UI->getParent(); 02981 if (UserBB == DefBB) continue; 02982 // Be conservative. We don't want this xform to end up introducing 02983 // reloads just before load / store instructions. 02984 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 02985 return false; 02986 } 02987 02988 // InsertedTruncs - Only insert one trunc in each block once. 02989 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 02990 02991 bool MadeChange = false; 02992 for (Use &U : Src->uses()) { 02993 Instruction *User = cast<Instruction>(U.getUser()); 02994 02995 // Figure out which BB this ext is used in. 02996 BasicBlock *UserBB = User->getParent(); 02997 if (UserBB == DefBB) continue; 02998 02999 // Both src and def are live in this block. Rewrite the use. 03000 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 03001 03002 if (!InsertedTrunc) { 03003 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 03004 InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt); 03005 InsertedTruncsSet.insert(InsertedTrunc); 03006 } 03007 03008 // Replace a use of the {s|z}ext source with a use of the result. 03009 U = InsertedTrunc; 03010 ++NumExtUses; 03011 MadeChange = true; 03012 } 03013 03014 return MadeChange; 03015 } 03016 03017 /// isFormingBranchFromSelectProfitable - Returns true if a SelectInst should be 03018 /// turned into an explicit branch. 03019 static bool isFormingBranchFromSelectProfitable(SelectInst *SI) { 03020 // FIXME: This should use the same heuristics as IfConversion to determine 03021 // whether a select is better represented as a branch. This requires that 03022 // branch probability metadata is preserved for the select, which is not the 03023 // case currently. 03024 03025 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 03026 03027 // If the branch is predicted right, an out of order CPU can avoid blocking on 03028 // the compare. Emit cmovs on compares with a memory operand as branches to 03029 // avoid stalls on the load from memory. If the compare has more than one use 03030 // there's probably another cmov or setcc around so it's not worth emitting a 03031 // branch. 03032 if (!Cmp) 03033 return false; 03034 03035 Value *CmpOp0 = Cmp->getOperand(0); 03036 Value *CmpOp1 = Cmp->getOperand(1); 03037 03038 // We check that the memory operand has one use to avoid uses of the loaded 03039 // value directly after the compare, making branches unprofitable. 03040 return Cmp->hasOneUse() && 03041 ((isa<LoadInst>(CmpOp0) && CmpOp0->hasOneUse()) || 03042 (isa<LoadInst>(CmpOp1) && CmpOp1->hasOneUse())); 03043 } 03044 03045 03046 /// If we have a SelectInst that will likely profit from branch prediction, 03047 /// turn it into a branch. 03048 bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) { 03049 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 03050 03051 // Can we convert the 'select' to CF ? 03052 if (DisableSelectToBranch || OptSize || !TLI || VectorCond) 03053 return false; 03054 03055 TargetLowering::SelectSupportKind SelectKind; 03056 if (VectorCond) 03057 SelectKind = TargetLowering::VectorMaskSelect; 03058 else if (SI->getType()->isVectorTy()) 03059 SelectKind = TargetLowering::ScalarCondVectorVal; 03060 else 03061 SelectKind = TargetLowering::ScalarValSelect; 03062 03063 // Do we have efficient codegen support for this kind of 'selects' ? 03064 if (TLI->isSelectSupported(SelectKind)) { 03065 // We have efficient codegen support for the select instruction. 03066 // Check if it is profitable to keep this 'select'. 03067 if (!TLI->isPredictableSelectExpensive() || 03068 !isFormingBranchFromSelectProfitable(SI)) 03069 return false; 03070 } 03071 03072 ModifiedDT = true; 03073 03074 // First, we split the block containing the select into 2 blocks. 03075 BasicBlock *StartBlock = SI->getParent(); 03076 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI)); 03077 BasicBlock *NextBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 03078 03079 // Create a new block serving as the landing pad for the branch. 03080 BasicBlock *SmallBlock = BasicBlock::Create(SI->getContext(), "select.mid", 03081 NextBlock->getParent(), NextBlock); 03082 03083 // Move the unconditional branch from the block with the select in it into our 03084 // landing pad block. 03085 StartBlock->getTerminator()->eraseFromParent(); 03086 BranchInst::Create(NextBlock, SmallBlock); 03087 03088 // Insert the real conditional branch based on the original condition. 03089 BranchInst::Create(NextBlock, SmallBlock, SI->getCondition(), SI); 03090 03091 // The select itself is replaced with a PHI Node. 03092 PHINode *PN = PHINode::Create(SI->getType(), 2, "", NextBlock->begin()); 03093 PN->takeName(SI); 03094 PN->addIncoming(SI->getTrueValue(), StartBlock); 03095 PN->addIncoming(SI->getFalseValue(), SmallBlock); 03096 SI->replaceAllUsesWith(PN); 03097 SI->eraseFromParent(); 03098 03099 // Instruct OptimizeBlock to skip to the next block. 03100 CurInstIterator = StartBlock->end(); 03101 ++NumSelectsExpanded; 03102 return true; 03103 } 03104 03105 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 03106 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 03107 int SplatElem = -1; 03108 for (unsigned i = 0; i < Mask.size(); ++i) { 03109 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 03110 return false; 03111 SplatElem = Mask[i]; 03112 } 03113 03114 return true; 03115 } 03116 03117 /// Some targets have expensive vector shifts if the lanes aren't all the same 03118 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 03119 /// it's often worth sinking a shufflevector splat down to its use so that 03120 /// codegen can spot all lanes are identical. 03121 bool CodeGenPrepare::OptimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 03122 BasicBlock *DefBB = SVI->getParent(); 03123 03124 // Only do this xform if variable vector shifts are particularly expensive. 03125 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 03126 return false; 03127 03128 // We only expect better codegen by sinking a shuffle if we can recognise a 03129 // constant splat. 03130 if (!isBroadcastShuffle(SVI)) 03131 return false; 03132 03133 // InsertedShuffles - Only insert a shuffle in each block once. 03134 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 03135 03136 bool MadeChange = false; 03137 for (User *U : SVI->users()) { 03138 Instruction *UI = cast<Instruction>(U); 03139 03140 // Figure out which BB this ext is used in. 03141 BasicBlock *UserBB = UI->getParent(); 03142 if (UserBB == DefBB) continue; 03143 03144 // For now only apply this when the splat is used by a shift instruction. 03145 if (!UI->isShift()) continue; 03146 03147 // Everything checks out, sink the shuffle if the user's block doesn't 03148 // already have a copy. 03149 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 03150 03151 if (!InsertedShuffle) { 03152 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 03153 InsertedShuffle = new ShuffleVectorInst(SVI->getOperand(0), 03154 SVI->getOperand(1), 03155 SVI->getOperand(2), "", InsertPt); 03156 } 03157 03158 UI->replaceUsesOfWith(SVI, InsertedShuffle); 03159 MadeChange = true; 03160 } 03161 03162 // If we removed all uses, nuke the shuffle. 03163 if (SVI->use_empty()) { 03164 SVI->eraseFromParent(); 03165 MadeChange = true; 03166 } 03167 03168 return MadeChange; 03169 } 03170 03171 bool CodeGenPrepare::OptimizeInst(Instruction *I) { 03172 if (PHINode *P = dyn_cast<PHINode>(I)) { 03173 // It is possible for very late stage optimizations (such as SimplifyCFG) 03174 // to introduce PHI nodes too late to be cleaned up. If we detect such a 03175 // trivial PHI, go ahead and zap it here. 03176 if (Value *V = SimplifyInstruction(P, TLI ? TLI->getDataLayout() : nullptr, 03177 TLInfo, DT)) { 03178 P->replaceAllUsesWith(V); 03179 P->eraseFromParent(); 03180 ++NumPHIsElim; 03181 return true; 03182 } 03183 return false; 03184 } 03185 03186 if (CastInst *CI = dyn_cast<CastInst>(I)) { 03187 // If the source of the cast is a constant, then this should have 03188 // already been constant folded. The only reason NOT to constant fold 03189 // it is if something (e.g. LSR) was careful to place the constant 03190 // evaluation in a block other than then one that uses it (e.g. to hoist 03191 // the address of globals out of a loop). If this is the case, we don't 03192 // want to forward-subst the cast. 03193 if (isa<Constant>(CI->getOperand(0))) 03194 return false; 03195 03196 if (TLI && OptimizeNoopCopyExpression(CI, *TLI)) 03197 return true; 03198 03199 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 03200 /// Sink a zext or sext into its user blocks if the target type doesn't 03201 /// fit in one register 03202 if (TLI && TLI->getTypeAction(CI->getContext(), 03203 TLI->getValueType(CI->getType())) == 03204 TargetLowering::TypeExpandInteger) { 03205 return SinkCast(CI); 03206 } else { 03207 bool MadeChange = MoveExtToFormExtLoad(I); 03208 return MadeChange | OptimizeExtUses(I); 03209 } 03210 } 03211 return false; 03212 } 03213 03214 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 03215 if (!TLI || !TLI->hasMultipleConditionRegisters()) 03216 return OptimizeCmpExpression(CI); 03217 03218 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 03219 if (TLI) 03220 return OptimizeMemoryInst(I, I->getOperand(0), LI->getType()); 03221 return false; 03222 } 03223 03224 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 03225 if (TLI) 03226 return OptimizeMemoryInst(I, SI->getOperand(1), 03227 SI->getOperand(0)->getType()); 03228 return false; 03229 } 03230 03231 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 03232 03233 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 03234 BinOp->getOpcode() == Instruction::LShr)) { 03235 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 03236 if (TLI && CI && TLI->hasExtractBitsInsn()) 03237 return OptimizeExtractBits(BinOp, CI, *TLI); 03238 03239 return false; 03240 } 03241 03242 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 03243 if (GEPI->hasAllZeroIndices()) { 03244 /// The GEP operand must be a pointer, so must its result -> BitCast 03245 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 03246 GEPI->getName(), GEPI); 03247 GEPI->replaceAllUsesWith(NC); 03248 GEPI->eraseFromParent(); 03249 ++NumGEPsElim; 03250 OptimizeInst(NC); 03251 return true; 03252 } 03253 return false; 03254 } 03255 03256 if (CallInst *CI = dyn_cast<CallInst>(I)) 03257 return OptimizeCallInst(CI); 03258 03259 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 03260 return OptimizeSelectInst(SI); 03261 03262 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 03263 return OptimizeShuffleVectorInst(SVI); 03264 03265 return false; 03266 } 03267 03268 // In this pass we look for GEP and cast instructions that are used 03269 // across basic blocks and rewrite them to improve basic-block-at-a-time 03270 // selection. 03271 bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { 03272 SunkAddrs.clear(); 03273 bool MadeChange = false; 03274 03275 CurInstIterator = BB.begin(); 03276 while (CurInstIterator != BB.end()) 03277 MadeChange |= OptimizeInst(CurInstIterator++); 03278 03279 MadeChange |= DupRetToEnableTailCallOpts(&BB); 03280 03281 return MadeChange; 03282 } 03283 03284 // llvm.dbg.value is far away from the value then iSel may not be able 03285 // handle it properly. iSel will drop llvm.dbg.value if it can not 03286 // find a node corresponding to the value. 03287 bool CodeGenPrepare::PlaceDbgValues(Function &F) { 03288 bool MadeChange = false; 03289 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) { 03290 Instruction *PrevNonDbgInst = nullptr; 03291 for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE;) { 03292 Instruction *Insn = BI; ++BI; 03293 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 03294 // Leave dbg.values that refer to an alloca alone. These 03295 // instrinsics describe the address of a variable (= the alloca) 03296 // being taken. They should not be moved next to the alloca 03297 // (and to the beginning of the scope), but rather stay close to 03298 // where said address is used. 03299 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { 03300 PrevNonDbgInst = Insn; 03301 continue; 03302 } 03303 03304 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 03305 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 03306 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI); 03307 DVI->removeFromParent(); 03308 if (isa<PHINode>(VI)) 03309 DVI->insertBefore(VI->getParent()->getFirstInsertionPt()); 03310 else 03311 DVI->insertAfter(VI); 03312 MadeChange = true; 03313 ++NumDbgValueMoved; 03314 } 03315 } 03316 } 03317 return MadeChange; 03318 } 03319 03320 // If there is a sequence that branches based on comparing a single bit 03321 // against zero that can be combined into a single instruction, and the 03322 // target supports folding these into a single instruction, sink the 03323 // mask and compare into the branch uses. Do this before OptimizeBlock -> 03324 // OptimizeInst -> OptimizeCmpExpression, which perturbs the pattern being 03325 // searched for. 03326 bool CodeGenPrepare::sinkAndCmp(Function &F) { 03327 if (!EnableAndCmpSinking) 03328 return false; 03329 if (!TLI || !TLI->isMaskAndBranchFoldingLegal()) 03330 return false; 03331 bool MadeChange = false; 03332 for (Function::iterator I = F.begin(), E = F.end(); I != E; ) { 03333 BasicBlock *BB = I++; 03334 03335 // Does this BB end with the following? 03336 // %andVal = and %val, #single-bit-set 03337 // %icmpVal = icmp %andResult, 0 03338 // br i1 %cmpVal label %dest1, label %dest2" 03339 BranchInst *Brcc = dyn_cast<BranchInst>(BB->getTerminator()); 03340 if (!Brcc || !Brcc->isConditional()) 03341 continue; 03342 ICmpInst *Cmp = dyn_cast<ICmpInst>(Brcc->getOperand(0)); 03343 if (!Cmp || Cmp->getParent() != BB) 03344 continue; 03345 ConstantInt *Zero = dyn_cast<ConstantInt>(Cmp->getOperand(1)); 03346 if (!Zero || !Zero->isZero()) 03347 continue; 03348 Instruction *And = dyn_cast<Instruction>(Cmp->getOperand(0)); 03349 if (!And || And->getOpcode() != Instruction::And || And->getParent() != BB) 03350 continue; 03351 ConstantInt* Mask = dyn_cast<ConstantInt>(And->getOperand(1)); 03352 if (!Mask || !Mask->getUniqueInteger().isPowerOf2()) 03353 continue; 03354 DEBUG(dbgs() << "found and; icmp ?,0; brcc\n"); DEBUG(BB->dump()); 03355 03356 // Push the "and; icmp" for any users that are conditional branches. 03357 // Since there can only be one branch use per BB, we don't need to keep 03358 // track of which BBs we insert into. 03359 for (Value::use_iterator UI = Cmp->use_begin(), E = Cmp->use_end(); 03360 UI != E; ) { 03361 Use &TheUse = *UI; 03362 // Find brcc use. 03363 BranchInst *BrccUser = dyn_cast<BranchInst>(*UI); 03364 ++UI; 03365 if (!BrccUser || !BrccUser->isConditional()) 03366 continue; 03367 BasicBlock *UserBB = BrccUser->getParent(); 03368 if (UserBB == BB) continue; 03369 DEBUG(dbgs() << "found Brcc use\n"); 03370 03371 // Sink the "and; icmp" to use. 03372 MadeChange = true; 03373 BinaryOperator *NewAnd = 03374 BinaryOperator::CreateAnd(And->getOperand(0), And->getOperand(1), "", 03375 BrccUser); 03376 CmpInst *NewCmp = 03377 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), NewAnd, Zero, 03378 "", BrccUser); 03379 TheUse = NewCmp; 03380 ++NumAndCmpsMoved; 03381 DEBUG(BrccUser->getParent()->dump()); 03382 } 03383 } 03384 return MadeChange; 03385 }