LLVM API Documentation
00001 //===- InstructionCombining.cpp - Combine multiple instructions -----------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // InstructionCombining - Combine instructions to form fewer, simple 00011 // instructions. This pass does not modify the CFG. This pass is where 00012 // algebraic simplification happens. 00013 // 00014 // This pass combines things like: 00015 // %Y = add i32 %X, 1 00016 // %Z = add i32 %Y, 1 00017 // into: 00018 // %Z = add i32 %X, 2 00019 // 00020 // This is a simple worklist driven algorithm. 00021 // 00022 // This pass guarantees that the following canonicalizations are performed on 00023 // the program: 00024 // 1. If a binary operator has a constant operand, it is moved to the RHS 00025 // 2. Bitwise operators with constant operands are always grouped so that 00026 // shifts are performed first, then or's, then and's, then xor's. 00027 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible 00028 // 4. All cmp instructions on boolean values are replaced with logical ops 00029 // 5. add X, X is represented as (X*2) => (X << 1) 00030 // 6. Multiplies with a power-of-two constant argument are transformed into 00031 // shifts. 00032 // ... etc. 00033 // 00034 //===----------------------------------------------------------------------===// 00035 00036 #include "llvm/Transforms/Scalar.h" 00037 #include "InstCombine.h" 00038 #include "llvm-c/Initialization.h" 00039 #include "llvm/ADT/SmallPtrSet.h" 00040 #include "llvm/ADT/Statistic.h" 00041 #include "llvm/ADT/StringSwitch.h" 00042 #include "llvm/Analysis/AssumptionTracker.h" 00043 #include "llvm/Analysis/ConstantFolding.h" 00044 #include "llvm/Analysis/InstructionSimplify.h" 00045 #include "llvm/Analysis/MemoryBuiltins.h" 00046 #include "llvm/Analysis/ValueTracking.h" 00047 #include "llvm/IR/CFG.h" 00048 #include "llvm/IR/DataLayout.h" 00049 #include "llvm/IR/Dominators.h" 00050 #include "llvm/IR/GetElementPtrTypeIterator.h" 00051 #include "llvm/IR/IntrinsicInst.h" 00052 #include "llvm/IR/PatternMatch.h" 00053 #include "llvm/IR/ValueHandle.h" 00054 #include "llvm/Support/CommandLine.h" 00055 #include "llvm/Support/Debug.h" 00056 #include "llvm/Target/TargetLibraryInfo.h" 00057 #include "llvm/Transforms/Utils/Local.h" 00058 #include <algorithm> 00059 #include <climits> 00060 using namespace llvm; 00061 using namespace llvm::PatternMatch; 00062 00063 #define DEBUG_TYPE "instcombine" 00064 00065 STATISTIC(NumCombined , "Number of insts combined"); 00066 STATISTIC(NumConstProp, "Number of constant folds"); 00067 STATISTIC(NumDeadInst , "Number of dead inst eliminated"); 00068 STATISTIC(NumSunkInst , "Number of instructions sunk"); 00069 STATISTIC(NumExpand, "Number of expansions"); 00070 STATISTIC(NumFactor , "Number of factorizations"); 00071 STATISTIC(NumReassoc , "Number of reassociations"); 00072 00073 static cl::opt<bool> 00074 EnableUnsafeFPShrink("enable-double-float-shrink", cl::Hidden, 00075 cl::init(false), 00076 cl::desc("Enable unsafe double to float " 00077 "shrinking for math lib calls")); 00078 00079 // Initialization Routines 00080 void llvm::initializeInstCombine(PassRegistry &Registry) { 00081 initializeInstCombinerPass(Registry); 00082 } 00083 00084 void LLVMInitializeInstCombine(LLVMPassRegistryRef R) { 00085 initializeInstCombine(*unwrap(R)); 00086 } 00087 00088 char InstCombiner::ID = 0; 00089 INITIALIZE_PASS_BEGIN(InstCombiner, "instcombine", 00090 "Combine redundant instructions", false, false) 00091 INITIALIZE_PASS_DEPENDENCY(AssumptionTracker) 00092 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 00093 INITIALIZE_PASS_END(InstCombiner, "instcombine", 00094 "Combine redundant instructions", false, false) 00095 00096 void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const { 00097 AU.setPreservesCFG(); 00098 AU.addRequired<AssumptionTracker>(); 00099 AU.addRequired<TargetLibraryInfo>(); 00100 } 00101 00102 00103 Value *InstCombiner::EmitGEPOffset(User *GEP) { 00104 return llvm::EmitGEPOffset(Builder, *getDataLayout(), GEP); 00105 } 00106 00107 /// ShouldChangeType - Return true if it is desirable to convert a computation 00108 /// from 'From' to 'To'. We don't want to convert from a legal to an illegal 00109 /// type for example, or from a smaller to a larger illegal type. 00110 bool InstCombiner::ShouldChangeType(Type *From, Type *To) const { 00111 assert(From->isIntegerTy() && To->isIntegerTy()); 00112 00113 // If we don't have DL, we don't know if the source/dest are legal. 00114 if (!DL) return false; 00115 00116 unsigned FromWidth = From->getPrimitiveSizeInBits(); 00117 unsigned ToWidth = To->getPrimitiveSizeInBits(); 00118 bool FromLegal = DL->isLegalInteger(FromWidth); 00119 bool ToLegal = DL->isLegalInteger(ToWidth); 00120 00121 // If this is a legal integer from type, and the result would be an illegal 00122 // type, don't do the transformation. 00123 if (FromLegal && !ToLegal) 00124 return false; 00125 00126 // Otherwise, if both are illegal, do not increase the size of the result. We 00127 // do allow things like i160 -> i64, but not i64 -> i160. 00128 if (!FromLegal && !ToLegal && ToWidth > FromWidth) 00129 return false; 00130 00131 return true; 00132 } 00133 00134 // Return true, if No Signed Wrap should be maintained for I. 00135 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C", 00136 // where both B and C should be ConstantInts, results in a constant that does 00137 // not overflow. This function only handles the Add and Sub opcodes. For 00138 // all other opcodes, the function conservatively returns false. 00139 static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) { 00140 OverflowingBinaryOperator *OBO = dyn_cast<OverflowingBinaryOperator>(&I); 00141 if (!OBO || !OBO->hasNoSignedWrap()) { 00142 return false; 00143 } 00144 00145 // We reason about Add and Sub Only. 00146 Instruction::BinaryOps Opcode = I.getOpcode(); 00147 if (Opcode != Instruction::Add && 00148 Opcode != Instruction::Sub) { 00149 return false; 00150 } 00151 00152 ConstantInt *CB = dyn_cast<ConstantInt>(B); 00153 ConstantInt *CC = dyn_cast<ConstantInt>(C); 00154 00155 if (!CB || !CC) { 00156 return false; 00157 } 00158 00159 const APInt &BVal = CB->getValue(); 00160 const APInt &CVal = CC->getValue(); 00161 bool Overflow = false; 00162 00163 if (Opcode == Instruction::Add) { 00164 BVal.sadd_ov(CVal, Overflow); 00165 } else { 00166 BVal.ssub_ov(CVal, Overflow); 00167 } 00168 00169 return !Overflow; 00170 } 00171 00172 /// Conservatively clears subclassOptionalData after a reassociation or 00173 /// commutation. We preserve fast-math flags when applicable as they can be 00174 /// preserved. 00175 static void ClearSubclassDataAfterReassociation(BinaryOperator &I) { 00176 FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I); 00177 if (!FPMO) { 00178 I.clearSubclassOptionalData(); 00179 return; 00180 } 00181 00182 FastMathFlags FMF = I.getFastMathFlags(); 00183 I.clearSubclassOptionalData(); 00184 I.setFastMathFlags(FMF); 00185 } 00186 00187 /// SimplifyAssociativeOrCommutative - This performs a few simplifications for 00188 /// operators which are associative or commutative: 00189 // 00190 // Commutative operators: 00191 // 00192 // 1. Order operands such that they are listed from right (least complex) to 00193 // left (most complex). This puts constants before unary operators before 00194 // binary operators. 00195 // 00196 // Associative operators: 00197 // 00198 // 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 00199 // 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 00200 // 00201 // Associative and commutative operators: 00202 // 00203 // 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 00204 // 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 00205 // 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 00206 // if C1 and C2 are constants. 00207 // 00208 bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) { 00209 Instruction::BinaryOps Opcode = I.getOpcode(); 00210 bool Changed = false; 00211 00212 do { 00213 // Order operands such that they are listed from right (least complex) to 00214 // left (most complex). This puts constants before unary operators before 00215 // binary operators. 00216 if (I.isCommutative() && getComplexity(I.getOperand(0)) < 00217 getComplexity(I.getOperand(1))) 00218 Changed = !I.swapOperands(); 00219 00220 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0)); 00221 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1)); 00222 00223 if (I.isAssociative()) { 00224 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 00225 if (Op0 && Op0->getOpcode() == Opcode) { 00226 Value *A = Op0->getOperand(0); 00227 Value *B = Op0->getOperand(1); 00228 Value *C = I.getOperand(1); 00229 00230 // Does "B op C" simplify? 00231 if (Value *V = SimplifyBinOp(Opcode, B, C, DL)) { 00232 // It simplifies to V. Form "A op V". 00233 I.setOperand(0, A); 00234 I.setOperand(1, V); 00235 // Conservatively clear the optional flags, since they may not be 00236 // preserved by the reassociation. 00237 if (MaintainNoSignedWrap(I, B, C) && 00238 (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) { 00239 // Note: this is only valid because SimplifyBinOp doesn't look at 00240 // the operands to Op0. 00241 I.clearSubclassOptionalData(); 00242 I.setHasNoSignedWrap(true); 00243 } else { 00244 ClearSubclassDataAfterReassociation(I); 00245 } 00246 00247 Changed = true; 00248 ++NumReassoc; 00249 continue; 00250 } 00251 } 00252 00253 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 00254 if (Op1 && Op1->getOpcode() == Opcode) { 00255 Value *A = I.getOperand(0); 00256 Value *B = Op1->getOperand(0); 00257 Value *C = Op1->getOperand(1); 00258 00259 // Does "A op B" simplify? 00260 if (Value *V = SimplifyBinOp(Opcode, A, B, DL)) { 00261 // It simplifies to V. Form "V op C". 00262 I.setOperand(0, V); 00263 I.setOperand(1, C); 00264 // Conservatively clear the optional flags, since they may not be 00265 // preserved by the reassociation. 00266 ClearSubclassDataAfterReassociation(I); 00267 Changed = true; 00268 ++NumReassoc; 00269 continue; 00270 } 00271 } 00272 } 00273 00274 if (I.isAssociative() && I.isCommutative()) { 00275 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 00276 if (Op0 && Op0->getOpcode() == Opcode) { 00277 Value *A = Op0->getOperand(0); 00278 Value *B = Op0->getOperand(1); 00279 Value *C = I.getOperand(1); 00280 00281 // Does "C op A" simplify? 00282 if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) { 00283 // It simplifies to V. Form "V op B". 00284 I.setOperand(0, V); 00285 I.setOperand(1, B); 00286 // Conservatively clear the optional flags, since they may not be 00287 // preserved by the reassociation. 00288 ClearSubclassDataAfterReassociation(I); 00289 Changed = true; 00290 ++NumReassoc; 00291 continue; 00292 } 00293 } 00294 00295 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 00296 if (Op1 && Op1->getOpcode() == Opcode) { 00297 Value *A = I.getOperand(0); 00298 Value *B = Op1->getOperand(0); 00299 Value *C = Op1->getOperand(1); 00300 00301 // Does "C op A" simplify? 00302 if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) { 00303 // It simplifies to V. Form "B op V". 00304 I.setOperand(0, B); 00305 I.setOperand(1, V); 00306 // Conservatively clear the optional flags, since they may not be 00307 // preserved by the reassociation. 00308 ClearSubclassDataAfterReassociation(I); 00309 Changed = true; 00310 ++NumReassoc; 00311 continue; 00312 } 00313 } 00314 00315 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 00316 // if C1 and C2 are constants. 00317 if (Op0 && Op1 && 00318 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode && 00319 isa<Constant>(Op0->getOperand(1)) && 00320 isa<Constant>(Op1->getOperand(1)) && 00321 Op0->hasOneUse() && Op1->hasOneUse()) { 00322 Value *A = Op0->getOperand(0); 00323 Constant *C1 = cast<Constant>(Op0->getOperand(1)); 00324 Value *B = Op1->getOperand(0); 00325 Constant *C2 = cast<Constant>(Op1->getOperand(1)); 00326 00327 Constant *Folded = ConstantExpr::get(Opcode, C1, C2); 00328 BinaryOperator *New = BinaryOperator::Create(Opcode, A, B); 00329 if (isa<FPMathOperator>(New)) { 00330 FastMathFlags Flags = I.getFastMathFlags(); 00331 Flags &= Op0->getFastMathFlags(); 00332 Flags &= Op1->getFastMathFlags(); 00333 New->setFastMathFlags(Flags); 00334 } 00335 InsertNewInstWith(New, I); 00336 New->takeName(Op1); 00337 I.setOperand(0, New); 00338 I.setOperand(1, Folded); 00339 // Conservatively clear the optional flags, since they may not be 00340 // preserved by the reassociation. 00341 ClearSubclassDataAfterReassociation(I); 00342 00343 Changed = true; 00344 continue; 00345 } 00346 } 00347 00348 // No further simplifications. 00349 return Changed; 00350 } while (1); 00351 } 00352 00353 /// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to 00354 /// "(X LOp Y) ROp (X LOp Z)". 00355 static bool LeftDistributesOverRight(Instruction::BinaryOps LOp, 00356 Instruction::BinaryOps ROp) { 00357 switch (LOp) { 00358 default: 00359 return false; 00360 00361 case Instruction::And: 00362 // And distributes over Or and Xor. 00363 switch (ROp) { 00364 default: 00365 return false; 00366 case Instruction::Or: 00367 case Instruction::Xor: 00368 return true; 00369 } 00370 00371 case Instruction::Mul: 00372 // Multiplication distributes over addition and subtraction. 00373 switch (ROp) { 00374 default: 00375 return false; 00376 case Instruction::Add: 00377 case Instruction::Sub: 00378 return true; 00379 } 00380 00381 case Instruction::Or: 00382 // Or distributes over And. 00383 switch (ROp) { 00384 default: 00385 return false; 00386 case Instruction::And: 00387 return true; 00388 } 00389 } 00390 } 00391 00392 /// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to 00393 /// "(X ROp Z) LOp (Y ROp Z)". 00394 static bool RightDistributesOverLeft(Instruction::BinaryOps LOp, 00395 Instruction::BinaryOps ROp) { 00396 if (Instruction::isCommutative(ROp)) 00397 return LeftDistributesOverRight(ROp, LOp); 00398 00399 switch (LOp) { 00400 default: 00401 return false; 00402 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts. 00403 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts. 00404 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts. 00405 case Instruction::And: 00406 case Instruction::Or: 00407 case Instruction::Xor: 00408 switch (ROp) { 00409 default: 00410 return false; 00411 case Instruction::Shl: 00412 case Instruction::LShr: 00413 case Instruction::AShr: 00414 return true; 00415 } 00416 } 00417 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z", 00418 // but this requires knowing that the addition does not overflow and other 00419 // such subtleties. 00420 return false; 00421 } 00422 00423 /// This function returns identity value for given opcode, which can be used to 00424 /// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1). 00425 static Value *getIdentityValue(Instruction::BinaryOps OpCode, Value *V) { 00426 if (isa<Constant>(V)) 00427 return nullptr; 00428 00429 if (OpCode == Instruction::Mul) 00430 return ConstantInt::get(V->getType(), 1); 00431 00432 // TODO: We can handle other cases e.g. Instruction::And, Instruction::Or etc. 00433 00434 return nullptr; 00435 } 00436 00437 /// This function factors binary ops which can be combined using distributive 00438 /// laws. This function tries to transform 'Op' based TopLevelOpcode to enable 00439 /// factorization e.g for ADD(SHL(X , 2), MUL(X, 5)), When this function called 00440 /// with TopLevelOpcode == Instruction::Add and Op = SHL(X, 2), transforms 00441 /// SHL(X, 2) to MUL(X, 4) i.e. returns Instruction::Mul with LHS set to 'X' and 00442 /// RHS to 4. 00443 static Instruction::BinaryOps 00444 getBinOpsForFactorization(Instruction::BinaryOps TopLevelOpcode, 00445 BinaryOperator *Op, Value *&LHS, Value *&RHS) { 00446 if (!Op) 00447 return Instruction::BinaryOpsEnd; 00448 00449 LHS = Op->getOperand(0); 00450 RHS = Op->getOperand(1); 00451 00452 switch (TopLevelOpcode) { 00453 default: 00454 return Op->getOpcode(); 00455 00456 case Instruction::Add: 00457 case Instruction::Sub: 00458 if (Op->getOpcode() == Instruction::Shl) { 00459 if (Constant *CST = dyn_cast<Constant>(Op->getOperand(1))) { 00460 // The multiplier is really 1 << CST. 00461 RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), CST); 00462 return Instruction::Mul; 00463 } 00464 } 00465 return Op->getOpcode(); 00466 } 00467 00468 // TODO: We can add other conversions e.g. shr => div etc. 00469 } 00470 00471 /// This tries to simplify binary operations by factorizing out common terms 00472 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)"). 00473 static Value *tryFactorization(InstCombiner::BuilderTy *Builder, 00474 const DataLayout *DL, BinaryOperator &I, 00475 Instruction::BinaryOps InnerOpcode, Value *A, 00476 Value *B, Value *C, Value *D) { 00477 00478 // If any of A, B, C, D are null, we can not factor I, return early. 00479 // Checking A and C should be enough. 00480 if (!A || !C || !B || !D) 00481 return nullptr; 00482 00483 Value *SimplifiedInst = nullptr; 00484 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 00485 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); 00486 00487 // Does "X op' Y" always equal "Y op' X"? 00488 bool InnerCommutative = Instruction::isCommutative(InnerOpcode); 00489 00490 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"? 00491 if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode)) 00492 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the 00493 // commutative case, "(A op' B) op (C op' A)"? 00494 if (A == C || (InnerCommutative && A == D)) { 00495 if (A != C) 00496 std::swap(C, D); 00497 // Consider forming "A op' (B op D)". 00498 // If "B op D" simplifies then it can be formed with no cost. 00499 Value *V = SimplifyBinOp(TopLevelOpcode, B, D, DL); 00500 // If "B op D" doesn't simplify then only go on if both of the existing 00501 // operations "A op' B" and "C op' D" will be zapped as no longer used. 00502 if (!V && LHS->hasOneUse() && RHS->hasOneUse()) 00503 V = Builder->CreateBinOp(TopLevelOpcode, B, D, RHS->getName()); 00504 if (V) { 00505 SimplifiedInst = Builder->CreateBinOp(InnerOpcode, A, V); 00506 } 00507 } 00508 00509 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"? 00510 if (!SimplifiedInst && RightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) 00511 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the 00512 // commutative case, "(A op' B) op (B op' D)"? 00513 if (B == D || (InnerCommutative && B == C)) { 00514 if (B != D) 00515 std::swap(C, D); 00516 // Consider forming "(A op C) op' B". 00517 // If "A op C" simplifies then it can be formed with no cost. 00518 Value *V = SimplifyBinOp(TopLevelOpcode, A, C, DL); 00519 00520 // If "A op C" doesn't simplify then only go on if both of the existing 00521 // operations "A op' B" and "C op' D" will be zapped as no longer used. 00522 if (!V && LHS->hasOneUse() && RHS->hasOneUse()) 00523 V = Builder->CreateBinOp(TopLevelOpcode, A, C, LHS->getName()); 00524 if (V) { 00525 SimplifiedInst = Builder->CreateBinOp(InnerOpcode, V, B); 00526 } 00527 } 00528 00529 if (SimplifiedInst) { 00530 ++NumFactor; 00531 SimplifiedInst->takeName(&I); 00532 00533 // Check if we can add NSW flag to SimplifiedInst. If so, set NSW flag. 00534 // TODO: Check for NUW. 00535 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(SimplifiedInst)) { 00536 if (isa<OverflowingBinaryOperator>(SimplifiedInst)) { 00537 bool HasNSW = false; 00538 if (isa<OverflowingBinaryOperator>(&I)) 00539 HasNSW = I.hasNoSignedWrap(); 00540 00541 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS)) 00542 if (isa<OverflowingBinaryOperator>(Op0)) 00543 HasNSW &= Op0->hasNoSignedWrap(); 00544 00545 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS)) 00546 if (isa<OverflowingBinaryOperator>(Op1)) 00547 HasNSW &= Op1->hasNoSignedWrap(); 00548 BO->setHasNoSignedWrap(HasNSW); 00549 } 00550 } 00551 } 00552 return SimplifiedInst; 00553 } 00554 00555 /// SimplifyUsingDistributiveLaws - This tries to simplify binary operations 00556 /// which some other binary operation distributes over either by factorizing 00557 /// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this 00558 /// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is 00559 /// a win). Returns the simplified value, or null if it didn't simplify. 00560 Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) { 00561 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 00562 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 00563 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 00564 00565 // Factorization. 00566 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; 00567 auto TopLevelOpcode = I.getOpcode(); 00568 auto LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B); 00569 auto RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D); 00570 00571 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize 00572 // a common term. 00573 if (LHSOpcode == RHSOpcode) { 00574 if (Value *V = tryFactorization(Builder, DL, I, LHSOpcode, A, B, C, D)) 00575 return V; 00576 } 00577 00578 // The instruction has the form "(A op' B) op (C)". Try to factorize common 00579 // term. 00580 if (Value *V = tryFactorization(Builder, DL, I, LHSOpcode, A, B, RHS, 00581 getIdentityValue(LHSOpcode, RHS))) 00582 return V; 00583 00584 // The instruction has the form "(B) op (C op' D)". Try to factorize common 00585 // term. 00586 if (Value *V = tryFactorization(Builder, DL, I, RHSOpcode, LHS, 00587 getIdentityValue(RHSOpcode, LHS), C, D)) 00588 return V; 00589 00590 // Expansion. 00591 if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) { 00592 // The instruction has the form "(A op' B) op C". See if expanding it out 00593 // to "(A op C) op' (B op C)" results in simplifications. 00594 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 00595 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op' 00596 00597 // Do "A op C" and "B op C" both simplify? 00598 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, DL)) 00599 if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, DL)) { 00600 // They do! Return "L op' R". 00601 ++NumExpand; 00602 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS. 00603 if ((L == A && R == B) || 00604 (Instruction::isCommutative(InnerOpcode) && L == B && R == A)) 00605 return Op0; 00606 // Otherwise return "L op' R" if it simplifies. 00607 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL)) 00608 return V; 00609 // Otherwise, create a new instruction. 00610 C = Builder->CreateBinOp(InnerOpcode, L, R); 00611 C->takeName(&I); 00612 return C; 00613 } 00614 } 00615 00616 if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) { 00617 // The instruction has the form "A op (B op' C)". See if expanding it out 00618 // to "(A op B) op' (A op C)" results in simplifications. 00619 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 00620 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op' 00621 00622 // Do "A op B" and "A op C" both simplify? 00623 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, DL)) 00624 if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, DL)) { 00625 // They do! Return "L op' R". 00626 ++NumExpand; 00627 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS. 00628 if ((L == B && R == C) || 00629 (Instruction::isCommutative(InnerOpcode) && L == C && R == B)) 00630 return Op1; 00631 // Otherwise return "L op' R" if it simplifies. 00632 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL)) 00633 return V; 00634 // Otherwise, create a new instruction. 00635 A = Builder->CreateBinOp(InnerOpcode, L, R); 00636 A->takeName(&I); 00637 return A; 00638 } 00639 } 00640 00641 return nullptr; 00642 } 00643 00644 // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction 00645 // if the LHS is a constant zero (which is the 'negate' form). 00646 // 00647 Value *InstCombiner::dyn_castNegVal(Value *V) const { 00648 if (BinaryOperator::isNeg(V)) 00649 return BinaryOperator::getNegArgument(V); 00650 00651 // Constants can be considered to be negated values if they can be folded. 00652 if (ConstantInt *C = dyn_cast<ConstantInt>(V)) 00653 return ConstantExpr::getNeg(C); 00654 00655 if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V)) 00656 if (C->getType()->getElementType()->isIntegerTy()) 00657 return ConstantExpr::getNeg(C); 00658 00659 return nullptr; 00660 } 00661 00662 // dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the 00663 // instruction if the LHS is a constant negative zero (which is the 'negate' 00664 // form). 00665 // 00666 Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const { 00667 if (BinaryOperator::isFNeg(V, IgnoreZeroSign)) 00668 return BinaryOperator::getFNegArgument(V); 00669 00670 // Constants can be considered to be negated values if they can be folded. 00671 if (ConstantFP *C = dyn_cast<ConstantFP>(V)) 00672 return ConstantExpr::getFNeg(C); 00673 00674 if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V)) 00675 if (C->getType()->getElementType()->isFloatingPointTy()) 00676 return ConstantExpr::getFNeg(C); 00677 00678 return nullptr; 00679 } 00680 00681 static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO, 00682 InstCombiner *IC) { 00683 if (CastInst *CI = dyn_cast<CastInst>(&I)) { 00684 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType()); 00685 } 00686 00687 // Figure out if the constant is the left or the right argument. 00688 bool ConstIsRHS = isa<Constant>(I.getOperand(1)); 00689 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS)); 00690 00691 if (Constant *SOC = dyn_cast<Constant>(SO)) { 00692 if (ConstIsRHS) 00693 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand); 00694 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC); 00695 } 00696 00697 Value *Op0 = SO, *Op1 = ConstOperand; 00698 if (!ConstIsRHS) 00699 std::swap(Op0, Op1); 00700 00701 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I)) { 00702 Value *RI = IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1, 00703 SO->getName()+".op"); 00704 Instruction *FPInst = dyn_cast<Instruction>(RI); 00705 if (FPInst && isa<FPMathOperator>(FPInst)) 00706 FPInst->copyFastMathFlags(BO); 00707 return RI; 00708 } 00709 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I)) 00710 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1, 00711 SO->getName()+".cmp"); 00712 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I)) 00713 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1, 00714 SO->getName()+".cmp"); 00715 llvm_unreachable("Unknown binary instruction type!"); 00716 } 00717 00718 // FoldOpIntoSelect - Given an instruction with a select as one operand and a 00719 // constant as the other operand, try to fold the binary operator into the 00720 // select arguments. This also works for Cast instructions, which obviously do 00721 // not have a second operand. 00722 Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) { 00723 // Don't modify shared select instructions 00724 if (!SI->hasOneUse()) return nullptr; 00725 Value *TV = SI->getOperand(1); 00726 Value *FV = SI->getOperand(2); 00727 00728 if (isa<Constant>(TV) || isa<Constant>(FV)) { 00729 // Bool selects with constant operands can be folded to logical ops. 00730 if (SI->getType()->isIntegerTy(1)) return nullptr; 00731 00732 // If it's a bitcast involving vectors, make sure it has the same number of 00733 // elements on both sides. 00734 if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) { 00735 VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy()); 00736 VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy()); 00737 00738 // Verify that either both or neither are vectors. 00739 if ((SrcTy == nullptr) != (DestTy == nullptr)) return nullptr; 00740 // If vectors, verify that they have the same number of elements. 00741 if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements()) 00742 return nullptr; 00743 } 00744 00745 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this); 00746 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this); 00747 00748 return SelectInst::Create(SI->getCondition(), 00749 SelectTrueVal, SelectFalseVal); 00750 } 00751 return nullptr; 00752 } 00753 00754 00755 /// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which 00756 /// has a PHI node as operand #0, see if we can fold the instruction into the 00757 /// PHI (which is only possible if all operands to the PHI are constants). 00758 /// 00759 Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) { 00760 PHINode *PN = cast<PHINode>(I.getOperand(0)); 00761 unsigned NumPHIValues = PN->getNumIncomingValues(); 00762 if (NumPHIValues == 0) 00763 return nullptr; 00764 00765 // We normally only transform phis with a single use. However, if a PHI has 00766 // multiple uses and they are all the same operation, we can fold *all* of the 00767 // uses into the PHI. 00768 if (!PN->hasOneUse()) { 00769 // Walk the use list for the instruction, comparing them to I. 00770 for (User *U : PN->users()) { 00771 Instruction *UI = cast<Instruction>(U); 00772 if (UI != &I && !I.isIdenticalTo(UI)) 00773 return nullptr; 00774 } 00775 // Otherwise, we can replace *all* users with the new PHI we form. 00776 } 00777 00778 // Check to see if all of the operands of the PHI are simple constants 00779 // (constantint/constantfp/undef). If there is one non-constant value, 00780 // remember the BB it is in. If there is more than one or if *it* is a PHI, 00781 // bail out. We don't do arbitrary constant expressions here because moving 00782 // their computation can be expensive without a cost model. 00783 BasicBlock *NonConstBB = nullptr; 00784 for (unsigned i = 0; i != NumPHIValues; ++i) { 00785 Value *InVal = PN->getIncomingValue(i); 00786 if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal)) 00787 continue; 00788 00789 if (isa<PHINode>(InVal)) return nullptr; // Itself a phi. 00790 if (NonConstBB) return nullptr; // More than one non-const value. 00791 00792 NonConstBB = PN->getIncomingBlock(i); 00793 00794 // If the InVal is an invoke at the end of the pred block, then we can't 00795 // insert a computation after it without breaking the edge. 00796 if (InvokeInst *II = dyn_cast<InvokeInst>(InVal)) 00797 if (II->getParent() == NonConstBB) 00798 return nullptr; 00799 00800 // If the incoming non-constant value is in I's block, we will remove one 00801 // instruction, but insert another equivalent one, leading to infinite 00802 // instcombine. 00803 if (NonConstBB == I.getParent()) 00804 return nullptr; 00805 } 00806 00807 // If there is exactly one non-constant value, we can insert a copy of the 00808 // operation in that block. However, if this is a critical edge, we would be 00809 // inserting the computation one some other paths (e.g. inside a loop). Only 00810 // do this if the pred block is unconditionally branching into the phi block. 00811 if (NonConstBB != nullptr) { 00812 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator()); 00813 if (!BI || !BI->isUnconditional()) return nullptr; 00814 } 00815 00816 // Okay, we can do the transformation: create the new PHI node. 00817 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues()); 00818 InsertNewInstBefore(NewPN, *PN); 00819 NewPN->takeName(PN); 00820 00821 // If we are going to have to insert a new computation, do so right before the 00822 // predecessors terminator. 00823 if (NonConstBB) 00824 Builder->SetInsertPoint(NonConstBB->getTerminator()); 00825 00826 // Next, add all of the operands to the PHI. 00827 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) { 00828 // We only currently try to fold the condition of a select when it is a phi, 00829 // not the true/false values. 00830 Value *TrueV = SI->getTrueValue(); 00831 Value *FalseV = SI->getFalseValue(); 00832 BasicBlock *PhiTransBB = PN->getParent(); 00833 for (unsigned i = 0; i != NumPHIValues; ++i) { 00834 BasicBlock *ThisBB = PN->getIncomingBlock(i); 00835 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB); 00836 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB); 00837 Value *InV = nullptr; 00838 // Beware of ConstantExpr: it may eventually evaluate to getNullValue, 00839 // even if currently isNullValue gives false. 00840 Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)); 00841 if (InC && !isa<ConstantExpr>(InC)) 00842 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred; 00843 else 00844 InV = Builder->CreateSelect(PN->getIncomingValue(i), 00845 TrueVInPred, FalseVInPred, "phitmp"); 00846 NewPN->addIncoming(InV, ThisBB); 00847 } 00848 } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) { 00849 Constant *C = cast<Constant>(I.getOperand(1)); 00850 for (unsigned i = 0; i != NumPHIValues; ++i) { 00851 Value *InV = nullptr; 00852 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 00853 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C); 00854 else if (isa<ICmpInst>(CI)) 00855 InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i), 00856 C, "phitmp"); 00857 else 00858 InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i), 00859 C, "phitmp"); 00860 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 00861 } 00862 } else if (I.getNumOperands() == 2) { 00863 Constant *C = cast<Constant>(I.getOperand(1)); 00864 for (unsigned i = 0; i != NumPHIValues; ++i) { 00865 Value *InV = nullptr; 00866 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 00867 InV = ConstantExpr::get(I.getOpcode(), InC, C); 00868 else 00869 InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(), 00870 PN->getIncomingValue(i), C, "phitmp"); 00871 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 00872 } 00873 } else { 00874 CastInst *CI = cast<CastInst>(&I); 00875 Type *RetTy = CI->getType(); 00876 for (unsigned i = 0; i != NumPHIValues; ++i) { 00877 Value *InV; 00878 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 00879 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy); 00880 else 00881 InV = Builder->CreateCast(CI->getOpcode(), 00882 PN->getIncomingValue(i), I.getType(), "phitmp"); 00883 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 00884 } 00885 } 00886 00887 for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) { 00888 Instruction *User = cast<Instruction>(*UI++); 00889 if (User == &I) continue; 00890 ReplaceInstUsesWith(*User, NewPN); 00891 EraseInstFromFunction(*User); 00892 } 00893 return ReplaceInstUsesWith(I, NewPN); 00894 } 00895 00896 /// FindElementAtOffset - Given a pointer type and a constant offset, determine 00897 /// whether or not there is a sequence of GEP indices into the pointed type that 00898 /// will land us at the specified offset. If so, fill them into NewIndices and 00899 /// return the resultant element type, otherwise return null. 00900 Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset, 00901 SmallVectorImpl<Value*> &NewIndices) { 00902 assert(PtrTy->isPtrOrPtrVectorTy()); 00903 00904 if (!DL) 00905 return nullptr; 00906 00907 Type *Ty = PtrTy->getPointerElementType(); 00908 if (!Ty->isSized()) 00909 return nullptr; 00910 00911 // Start with the index over the outer type. Note that the type size 00912 // might be zero (even if the offset isn't zero) if the indexed type 00913 // is something like [0 x {int, int}] 00914 Type *IntPtrTy = DL->getIntPtrType(PtrTy); 00915 int64_t FirstIdx = 0; 00916 if (int64_t TySize = DL->getTypeAllocSize(Ty)) { 00917 FirstIdx = Offset/TySize; 00918 Offset -= FirstIdx*TySize; 00919 00920 // Handle hosts where % returns negative instead of values [0..TySize). 00921 if (Offset < 0) { 00922 --FirstIdx; 00923 Offset += TySize; 00924 assert(Offset >= 0); 00925 } 00926 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset"); 00927 } 00928 00929 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx)); 00930 00931 // Index into the types. If we fail, set OrigBase to null. 00932 while (Offset) { 00933 // Indexing into tail padding between struct/array elements. 00934 if (uint64_t(Offset*8) >= DL->getTypeSizeInBits(Ty)) 00935 return nullptr; 00936 00937 if (StructType *STy = dyn_cast<StructType>(Ty)) { 00938 const StructLayout *SL = DL->getStructLayout(STy); 00939 assert(Offset < (int64_t)SL->getSizeInBytes() && 00940 "Offset must stay within the indexed type"); 00941 00942 unsigned Elt = SL->getElementContainingOffset(Offset); 00943 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 00944 Elt)); 00945 00946 Offset -= SL->getElementOffset(Elt); 00947 Ty = STy->getElementType(Elt); 00948 } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) { 00949 uint64_t EltSize = DL->getTypeAllocSize(AT->getElementType()); 00950 assert(EltSize && "Cannot index into a zero-sized array"); 00951 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize)); 00952 Offset %= EltSize; 00953 Ty = AT->getElementType(); 00954 } else { 00955 // Otherwise, we can't index into the middle of this atomic type, bail. 00956 return nullptr; 00957 } 00958 } 00959 00960 return Ty; 00961 } 00962 00963 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) { 00964 // If this GEP has only 0 indices, it is the same pointer as 00965 // Src. If Src is not a trivial GEP too, don't combine 00966 // the indices. 00967 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() && 00968 !Src.hasOneUse()) 00969 return false; 00970 return true; 00971 } 00972 00973 /// Descale - Return a value X such that Val = X * Scale, or null if none. If 00974 /// the multiplication is known not to overflow then NoSignedWrap is set. 00975 Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) { 00976 assert(isa<IntegerType>(Val->getType()) && "Can only descale integers!"); 00977 assert(cast<IntegerType>(Val->getType())->getBitWidth() == 00978 Scale.getBitWidth() && "Scale not compatible with value!"); 00979 00980 // If Val is zero or Scale is one then Val = Val * Scale. 00981 if (match(Val, m_Zero()) || Scale == 1) { 00982 NoSignedWrap = true; 00983 return Val; 00984 } 00985 00986 // If Scale is zero then it does not divide Val. 00987 if (Scale.isMinValue()) 00988 return nullptr; 00989 00990 // Look through chains of multiplications, searching for a constant that is 00991 // divisible by Scale. For example, descaling X*(Y*(Z*4)) by a factor of 4 00992 // will find the constant factor 4 and produce X*(Y*Z). Descaling X*(Y*8) by 00993 // a factor of 4 will produce X*(Y*2). The principle of operation is to bore 00994 // down from Val: 00995 // 00996 // Val = M1 * X || Analysis starts here and works down 00997 // M1 = M2 * Y || Doesn't descend into terms with more 00998 // M2 = Z * 4 \/ than one use 00999 // 01000 // Then to modify a term at the bottom: 01001 // 01002 // Val = M1 * X 01003 // M1 = Z * Y || Replaced M2 with Z 01004 // 01005 // Then to work back up correcting nsw flags. 01006 01007 // Op - the term we are currently analyzing. Starts at Val then drills down. 01008 // Replaced with its descaled value before exiting from the drill down loop. 01009 Value *Op = Val; 01010 01011 // Parent - initially null, but after drilling down notes where Op came from. 01012 // In the example above, Parent is (Val, 0) when Op is M1, because M1 is the 01013 // 0'th operand of Val. 01014 std::pair<Instruction*, unsigned> Parent; 01015 01016 // RequireNoSignedWrap - Set if the transform requires a descaling at deeper 01017 // levels that doesn't overflow. 01018 bool RequireNoSignedWrap = false; 01019 01020 // logScale - log base 2 of the scale. Negative if not a power of 2. 01021 int32_t logScale = Scale.exactLogBase2(); 01022 01023 for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down 01024 01025 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 01026 // If Op is a constant divisible by Scale then descale to the quotient. 01027 APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth. 01028 APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder); 01029 if (!Remainder.isMinValue()) 01030 // Not divisible by Scale. 01031 return nullptr; 01032 // Replace with the quotient in the parent. 01033 Op = ConstantInt::get(CI->getType(), Quotient); 01034 NoSignedWrap = true; 01035 break; 01036 } 01037 01038 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) { 01039 01040 if (BO->getOpcode() == Instruction::Mul) { 01041 // Multiplication. 01042 NoSignedWrap = BO->hasNoSignedWrap(); 01043 if (RequireNoSignedWrap && !NoSignedWrap) 01044 return nullptr; 01045 01046 // There are three cases for multiplication: multiplication by exactly 01047 // the scale, multiplication by a constant different to the scale, and 01048 // multiplication by something else. 01049 Value *LHS = BO->getOperand(0); 01050 Value *RHS = BO->getOperand(1); 01051 01052 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 01053 // Multiplication by a constant. 01054 if (CI->getValue() == Scale) { 01055 // Multiplication by exactly the scale, replace the multiplication 01056 // by its left-hand side in the parent. 01057 Op = LHS; 01058 break; 01059 } 01060 01061 // Otherwise drill down into the constant. 01062 if (!Op->hasOneUse()) 01063 return nullptr; 01064 01065 Parent = std::make_pair(BO, 1); 01066 continue; 01067 } 01068 01069 // Multiplication by something else. Drill down into the left-hand side 01070 // since that's where the reassociate pass puts the good stuff. 01071 if (!Op->hasOneUse()) 01072 return nullptr; 01073 01074 Parent = std::make_pair(BO, 0); 01075 continue; 01076 } 01077 01078 if (logScale > 0 && BO->getOpcode() == Instruction::Shl && 01079 isa<ConstantInt>(BO->getOperand(1))) { 01080 // Multiplication by a power of 2. 01081 NoSignedWrap = BO->hasNoSignedWrap(); 01082 if (RequireNoSignedWrap && !NoSignedWrap) 01083 return nullptr; 01084 01085 Value *LHS = BO->getOperand(0); 01086 int32_t Amt = cast<ConstantInt>(BO->getOperand(1))-> 01087 getLimitedValue(Scale.getBitWidth()); 01088 // Op = LHS << Amt. 01089 01090 if (Amt == logScale) { 01091 // Multiplication by exactly the scale, replace the multiplication 01092 // by its left-hand side in the parent. 01093 Op = LHS; 01094 break; 01095 } 01096 if (Amt < logScale || !Op->hasOneUse()) 01097 return nullptr; 01098 01099 // Multiplication by more than the scale. Reduce the multiplying amount 01100 // by the scale in the parent. 01101 Parent = std::make_pair(BO, 1); 01102 Op = ConstantInt::get(BO->getType(), Amt - logScale); 01103 break; 01104 } 01105 } 01106 01107 if (!Op->hasOneUse()) 01108 return nullptr; 01109 01110 if (CastInst *Cast = dyn_cast<CastInst>(Op)) { 01111 if (Cast->getOpcode() == Instruction::SExt) { 01112 // Op is sign-extended from a smaller type, descale in the smaller type. 01113 unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits(); 01114 APInt SmallScale = Scale.trunc(SmallSize); 01115 // Suppose Op = sext X, and we descale X as Y * SmallScale. We want to 01116 // descale Op as (sext Y) * Scale. In order to have 01117 // sext (Y * SmallScale) = (sext Y) * Scale 01118 // some conditions need to hold however: SmallScale must sign-extend to 01119 // Scale and the multiplication Y * SmallScale should not overflow. 01120 if (SmallScale.sext(Scale.getBitWidth()) != Scale) 01121 // SmallScale does not sign-extend to Scale. 01122 return nullptr; 01123 assert(SmallScale.exactLogBase2() == logScale); 01124 // Require that Y * SmallScale must not overflow. 01125 RequireNoSignedWrap = true; 01126 01127 // Drill down through the cast. 01128 Parent = std::make_pair(Cast, 0); 01129 Scale = SmallScale; 01130 continue; 01131 } 01132 01133 if (Cast->getOpcode() == Instruction::Trunc) { 01134 // Op is truncated from a larger type, descale in the larger type. 01135 // Suppose Op = trunc X, and we descale X as Y * sext Scale. Then 01136 // trunc (Y * sext Scale) = (trunc Y) * Scale 01137 // always holds. However (trunc Y) * Scale may overflow even if 01138 // trunc (Y * sext Scale) does not, so nsw flags need to be cleared 01139 // from this point up in the expression (see later). 01140 if (RequireNoSignedWrap) 01141 return nullptr; 01142 01143 // Drill down through the cast. 01144 unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits(); 01145 Parent = std::make_pair(Cast, 0); 01146 Scale = Scale.sext(LargeSize); 01147 if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits()) 01148 logScale = -1; 01149 assert(Scale.exactLogBase2() == logScale); 01150 continue; 01151 } 01152 } 01153 01154 // Unsupported expression, bail out. 01155 return nullptr; 01156 } 01157 01158 // If Op is zero then Val = Op * Scale. 01159 if (match(Op, m_Zero())) { 01160 NoSignedWrap = true; 01161 return Op; 01162 } 01163 01164 // We know that we can successfully descale, so from here on we can safely 01165 // modify the IR. Op holds the descaled version of the deepest term in the 01166 // expression. NoSignedWrap is 'true' if multiplying Op by Scale is known 01167 // not to overflow. 01168 01169 if (!Parent.first) 01170 // The expression only had one term. 01171 return Op; 01172 01173 // Rewrite the parent using the descaled version of its operand. 01174 assert(Parent.first->hasOneUse() && "Drilled down when more than one use!"); 01175 assert(Op != Parent.first->getOperand(Parent.second) && 01176 "Descaling was a no-op?"); 01177 Parent.first->setOperand(Parent.second, Op); 01178 Worklist.Add(Parent.first); 01179 01180 // Now work back up the expression correcting nsw flags. The logic is based 01181 // on the following observation: if X * Y is known not to overflow as a signed 01182 // multiplication, and Y is replaced by a value Z with smaller absolute value, 01183 // then X * Z will not overflow as a signed multiplication either. As we work 01184 // our way up, having NoSignedWrap 'true' means that the descaled value at the 01185 // current level has strictly smaller absolute value than the original. 01186 Instruction *Ancestor = Parent.first; 01187 do { 01188 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Ancestor)) { 01189 // If the multiplication wasn't nsw then we can't say anything about the 01190 // value of the descaled multiplication, and we have to clear nsw flags 01191 // from this point on up. 01192 bool OpNoSignedWrap = BO->hasNoSignedWrap(); 01193 NoSignedWrap &= OpNoSignedWrap; 01194 if (NoSignedWrap != OpNoSignedWrap) { 01195 BO->setHasNoSignedWrap(NoSignedWrap); 01196 Worklist.Add(Ancestor); 01197 } 01198 } else if (Ancestor->getOpcode() == Instruction::Trunc) { 01199 // The fact that the descaled input to the trunc has smaller absolute 01200 // value than the original input doesn't tell us anything useful about 01201 // the absolute values of the truncations. 01202 NoSignedWrap = false; 01203 } 01204 assert((Ancestor->getOpcode() != Instruction::SExt || NoSignedWrap) && 01205 "Failed to keep proper track of nsw flags while drilling down?"); 01206 01207 if (Ancestor == Val) 01208 // Got to the top, all done! 01209 return Val; 01210 01211 // Move up one level in the expression. 01212 assert(Ancestor->hasOneUse() && "Drilled down when more than one use!"); 01213 Ancestor = Ancestor->user_back(); 01214 } while (1); 01215 } 01216 01217 /// \brief Creates node of binary operation with the same attributes as the 01218 /// specified one but with other operands. 01219 static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS, 01220 InstCombiner::BuilderTy *B) { 01221 Value *BORes = B->CreateBinOp(Inst.getOpcode(), LHS, RHS); 01222 if (BinaryOperator *NewBO = dyn_cast<BinaryOperator>(BORes)) { 01223 if (isa<OverflowingBinaryOperator>(NewBO)) { 01224 NewBO->setHasNoSignedWrap(Inst.hasNoSignedWrap()); 01225 NewBO->setHasNoUnsignedWrap(Inst.hasNoUnsignedWrap()); 01226 } 01227 if (isa<PossiblyExactOperator>(NewBO)) 01228 NewBO->setIsExact(Inst.isExact()); 01229 } 01230 return BORes; 01231 } 01232 01233 /// \brief Makes transformation of binary operation specific for vector types. 01234 /// \param Inst Binary operator to transform. 01235 /// \return Pointer to node that must replace the original binary operator, or 01236 /// null pointer if no transformation was made. 01237 Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) { 01238 if (!Inst.getType()->isVectorTy()) return nullptr; 01239 01240 // It may not be safe to reorder shuffles and things like div, urem, etc. 01241 // because we may trap when executing those ops on unknown vector elements. 01242 // See PR20059. 01243 if (!isSafeToSpeculativelyExecute(&Inst, DL)) return nullptr; 01244 01245 unsigned VWidth = cast<VectorType>(Inst.getType())->getNumElements(); 01246 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1); 01247 assert(cast<VectorType>(LHS->getType())->getNumElements() == VWidth); 01248 assert(cast<VectorType>(RHS->getType())->getNumElements() == VWidth); 01249 01250 // If both arguments of binary operation are shuffles, which use the same 01251 // mask and shuffle within a single vector, it is worthwhile to move the 01252 // shuffle after binary operation: 01253 // Op(shuffle(v1, m), shuffle(v2, m)) -> shuffle(Op(v1, v2), m) 01254 if (isa<ShuffleVectorInst>(LHS) && isa<ShuffleVectorInst>(RHS)) { 01255 ShuffleVectorInst *LShuf = cast<ShuffleVectorInst>(LHS); 01256 ShuffleVectorInst *RShuf = cast<ShuffleVectorInst>(RHS); 01257 if (isa<UndefValue>(LShuf->getOperand(1)) && 01258 isa<UndefValue>(RShuf->getOperand(1)) && 01259 LShuf->getOperand(0)->getType() == RShuf->getOperand(0)->getType() && 01260 LShuf->getMask() == RShuf->getMask()) { 01261 Value *NewBO = CreateBinOpAsGiven(Inst, LShuf->getOperand(0), 01262 RShuf->getOperand(0), Builder); 01263 Value *Res = Builder->CreateShuffleVector(NewBO, 01264 UndefValue::get(NewBO->getType()), LShuf->getMask()); 01265 return Res; 01266 } 01267 } 01268 01269 // If one argument is a shuffle within one vector, the other is a constant, 01270 // try moving the shuffle after the binary operation. 01271 ShuffleVectorInst *Shuffle = nullptr; 01272 Constant *C1 = nullptr; 01273 if (isa<ShuffleVectorInst>(LHS)) Shuffle = cast<ShuffleVectorInst>(LHS); 01274 if (isa<ShuffleVectorInst>(RHS)) Shuffle = cast<ShuffleVectorInst>(RHS); 01275 if (isa<Constant>(LHS)) C1 = cast<Constant>(LHS); 01276 if (isa<Constant>(RHS)) C1 = cast<Constant>(RHS); 01277 if (Shuffle && C1 && 01278 (isa<ConstantVector>(C1) || isa<ConstantDataVector>(C1)) && 01279 isa<UndefValue>(Shuffle->getOperand(1)) && 01280 Shuffle->getType() == Shuffle->getOperand(0)->getType()) { 01281 SmallVector<int, 16> ShMask = Shuffle->getShuffleMask(); 01282 // Find constant C2 that has property: 01283 // shuffle(C2, ShMask) = C1 01284 // If such constant does not exist (example: ShMask=<0,0> and C1=<1,2>) 01285 // reorder is not possible. 01286 SmallVector<Constant*, 16> C2M(VWidth, 01287 UndefValue::get(C1->getType()->getScalarType())); 01288 bool MayChange = true; 01289 for (unsigned I = 0; I < VWidth; ++I) { 01290 if (ShMask[I] >= 0) { 01291 assert(ShMask[I] < (int)VWidth); 01292 if (!isa<UndefValue>(C2M[ShMask[I]])) { 01293 MayChange = false; 01294 break; 01295 } 01296 C2M[ShMask[I]] = C1->getAggregateElement(I); 01297 } 01298 } 01299 if (MayChange) { 01300 Constant *C2 = ConstantVector::get(C2M); 01301 Value *NewLHS, *NewRHS; 01302 if (isa<Constant>(LHS)) { 01303 NewLHS = C2; 01304 NewRHS = Shuffle->getOperand(0); 01305 } else { 01306 NewLHS = Shuffle->getOperand(0); 01307 NewRHS = C2; 01308 } 01309 Value *NewBO = CreateBinOpAsGiven(Inst, NewLHS, NewRHS, Builder); 01310 Value *Res = Builder->CreateShuffleVector(NewBO, 01311 UndefValue::get(Inst.getType()), Shuffle->getMask()); 01312 return Res; 01313 } 01314 } 01315 01316 return nullptr; 01317 } 01318 01319 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { 01320 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end()); 01321 01322 if (Value *V = SimplifyGEPInst(Ops, DL, TLI, DT, AT)) 01323 return ReplaceInstUsesWith(GEP, V); 01324 01325 Value *PtrOp = GEP.getOperand(0); 01326 01327 // Eliminate unneeded casts for indices, and replace indices which displace 01328 // by multiples of a zero size type with zero. 01329 if (DL) { 01330 bool MadeChange = false; 01331 Type *IntPtrTy = DL->getIntPtrType(GEP.getPointerOperandType()); 01332 01333 gep_type_iterator GTI = gep_type_begin(GEP); 01334 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); 01335 I != E; ++I, ++GTI) { 01336 // Skip indices into struct types. 01337 SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI); 01338 if (!SeqTy) continue; 01339 01340 // If the element type has zero size then any index over it is equivalent 01341 // to an index of zero, so replace it with zero if it is not zero already. 01342 if (SeqTy->getElementType()->isSized() && 01343 DL->getTypeAllocSize(SeqTy->getElementType()) == 0) 01344 if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) { 01345 *I = Constant::getNullValue(IntPtrTy); 01346 MadeChange = true; 01347 } 01348 01349 Type *IndexTy = (*I)->getType(); 01350 if (IndexTy != IntPtrTy) { 01351 // If we are using a wider index than needed for this platform, shrink 01352 // it to what we need. If narrower, sign-extend it to what we need. 01353 // This explicit cast can make subsequent optimizations more obvious. 01354 *I = Builder->CreateIntCast(*I, IntPtrTy, true); 01355 MadeChange = true; 01356 } 01357 } 01358 if (MadeChange) return &GEP; 01359 } 01360 01361 // Check to see if the inputs to the PHI node are getelementptr instructions. 01362 if (PHINode *PN = dyn_cast<PHINode>(PtrOp)) { 01363 GetElementPtrInst *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0)); 01364 if (!Op1) 01365 return nullptr; 01366 01367 signed DI = -1; 01368 01369 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) { 01370 GetElementPtrInst *Op2 = dyn_cast<GetElementPtrInst>(*I); 01371 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands()) 01372 return nullptr; 01373 01374 // Keep track of the type as we walk the GEP. 01375 Type *CurTy = Op1->getOperand(0)->getType()->getScalarType(); 01376 01377 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) { 01378 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType()) 01379 return nullptr; 01380 01381 if (Op1->getOperand(J) != Op2->getOperand(J)) { 01382 if (DI == -1) { 01383 // We have not seen any differences yet in the GEPs feeding the 01384 // PHI yet, so we record this one if it is allowed to be a 01385 // variable. 01386 01387 // The first two arguments can vary for any GEP, the rest have to be 01388 // static for struct slots 01389 if (J > 1 && CurTy->isStructTy()) 01390 return nullptr; 01391 01392 DI = J; 01393 } else { 01394 // The GEP is different by more than one input. While this could be 01395 // extended to support GEPs that vary by more than one variable it 01396 // doesn't make sense since it greatly increases the complexity and 01397 // would result in an R+R+R addressing mode which no backend 01398 // directly supports and would need to be broken into several 01399 // simpler instructions anyway. 01400 return nullptr; 01401 } 01402 } 01403 01404 // Sink down a layer of the type for the next iteration. 01405 if (J > 0) { 01406 if (CompositeType *CT = dyn_cast<CompositeType>(CurTy)) { 01407 CurTy = CT->getTypeAtIndex(Op1->getOperand(J)); 01408 } else { 01409 CurTy = nullptr; 01410 } 01411 } 01412 } 01413 } 01414 01415 GetElementPtrInst *NewGEP = cast<GetElementPtrInst>(Op1->clone()); 01416 01417 if (DI == -1) { 01418 // All the GEPs feeding the PHI are identical. Clone one down into our 01419 // BB so that it can be merged with the current GEP. 01420 GEP.getParent()->getInstList().insert(GEP.getParent()->getFirstNonPHI(), 01421 NewGEP); 01422 } else { 01423 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP 01424 // into the current block so it can be merged, and create a new PHI to 01425 // set that index. 01426 Instruction *InsertPt = Builder->GetInsertPoint(); 01427 Builder->SetInsertPoint(PN); 01428 PHINode *NewPN = Builder->CreatePHI(Op1->getOperand(DI)->getType(), 01429 PN->getNumOperands()); 01430 Builder->SetInsertPoint(InsertPt); 01431 01432 for (auto &I : PN->operands()) 01433 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI), 01434 PN->getIncomingBlock(I)); 01435 01436 NewGEP->setOperand(DI, NewPN); 01437 GEP.getParent()->getInstList().insert(GEP.getParent()->getFirstNonPHI(), 01438 NewGEP); 01439 NewGEP->setOperand(DI, NewPN); 01440 } 01441 01442 GEP.setOperand(0, NewGEP); 01443 PtrOp = NewGEP; 01444 } 01445 01446 // Combine Indices - If the source pointer to this getelementptr instruction 01447 // is a getelementptr instruction, combine the indices of the two 01448 // getelementptr instructions into a single instruction. 01449 // 01450 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) { 01451 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src)) 01452 return nullptr; 01453 01454 // Note that if our source is a gep chain itself then we wait for that 01455 // chain to be resolved before we perform this transformation. This 01456 // avoids us creating a TON of code in some cases. 01457 if (GEPOperator *SrcGEP = 01458 dyn_cast<GEPOperator>(Src->getOperand(0))) 01459 if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP)) 01460 return nullptr; // Wait until our source is folded to completion. 01461 01462 SmallVector<Value*, 8> Indices; 01463 01464 // Find out whether the last index in the source GEP is a sequential idx. 01465 bool EndsWithSequential = false; 01466 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src); 01467 I != E; ++I) 01468 EndsWithSequential = !(*I)->isStructTy(); 01469 01470 // Can we combine the two pointer arithmetics offsets? 01471 if (EndsWithSequential) { 01472 // Replace: gep (gep %P, long B), long A, ... 01473 // With: T = long A+B; gep %P, T, ... 01474 // 01475 Value *Sum; 01476 Value *SO1 = Src->getOperand(Src->getNumOperands()-1); 01477 Value *GO1 = GEP.getOperand(1); 01478 if (SO1 == Constant::getNullValue(SO1->getType())) { 01479 Sum = GO1; 01480 } else if (GO1 == Constant::getNullValue(GO1->getType())) { 01481 Sum = SO1; 01482 } else { 01483 // If they aren't the same type, then the input hasn't been processed 01484 // by the loop above yet (which canonicalizes sequential index types to 01485 // intptr_t). Just avoid transforming this until the input has been 01486 // normalized. 01487 if (SO1->getType() != GO1->getType()) 01488 return nullptr; 01489 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum"); 01490 } 01491 01492 // Update the GEP in place if possible. 01493 if (Src->getNumOperands() == 2) { 01494 GEP.setOperand(0, Src->getOperand(0)); 01495 GEP.setOperand(1, Sum); 01496 return &GEP; 01497 } 01498 Indices.append(Src->op_begin()+1, Src->op_end()-1); 01499 Indices.push_back(Sum); 01500 Indices.append(GEP.op_begin()+2, GEP.op_end()); 01501 } else if (isa<Constant>(*GEP.idx_begin()) && 01502 cast<Constant>(*GEP.idx_begin())->isNullValue() && 01503 Src->getNumOperands() != 1) { 01504 // Otherwise we can do the fold if the first index of the GEP is a zero 01505 Indices.append(Src->op_begin()+1, Src->op_end()); 01506 Indices.append(GEP.idx_begin()+1, GEP.idx_end()); 01507 } 01508 01509 if (!Indices.empty()) 01510 return (GEP.isInBounds() && Src->isInBounds()) ? 01511 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices, 01512 GEP.getName()) : 01513 GetElementPtrInst::Create(Src->getOperand(0), Indices, GEP.getName()); 01514 } 01515 01516 if (DL && GEP.getNumIndices() == 1) { 01517 unsigned AS = GEP.getPointerAddressSpace(); 01518 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() == 01519 DL->getPointerSizeInBits(AS)) { 01520 Type *PtrTy = GEP.getPointerOperandType(); 01521 Type *Ty = PtrTy->getPointerElementType(); 01522 uint64_t TyAllocSize = DL->getTypeAllocSize(Ty); 01523 01524 bool Matched = false; 01525 uint64_t C; 01526 Value *V = nullptr; 01527 if (TyAllocSize == 1) { 01528 V = GEP.getOperand(1); 01529 Matched = true; 01530 } else if (match(GEP.getOperand(1), 01531 m_AShr(m_Value(V), m_ConstantInt(C)))) { 01532 if (TyAllocSize == 1ULL << C) 01533 Matched = true; 01534 } else if (match(GEP.getOperand(1), 01535 m_SDiv(m_Value(V), m_ConstantInt(C)))) { 01536 if (TyAllocSize == C) 01537 Matched = true; 01538 } 01539 01540 if (Matched) { 01541 // Canonicalize (gep i8* X, -(ptrtoint Y)) 01542 // to (inttoptr (sub (ptrtoint X), (ptrtoint Y))) 01543 // The GEP pattern is emitted by the SCEV expander for certain kinds of 01544 // pointer arithmetic. 01545 if (match(V, m_Neg(m_PtrToInt(m_Value())))) { 01546 Operator *Index = cast<Operator>(V); 01547 Value *PtrToInt = Builder->CreatePtrToInt(PtrOp, Index->getType()); 01548 Value *NewSub = Builder->CreateSub(PtrToInt, Index->getOperand(1)); 01549 return CastInst::Create(Instruction::IntToPtr, NewSub, GEP.getType()); 01550 } 01551 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) 01552 // to (bitcast Y) 01553 Value *Y; 01554 if (match(V, m_Sub(m_PtrToInt(m_Value(Y)), 01555 m_PtrToInt(m_Specific(GEP.getOperand(0)))))) { 01556 return CastInst::CreatePointerBitCastOrAddrSpaceCast(Y, 01557 GEP.getType()); 01558 } 01559 } 01560 } 01561 } 01562 01563 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0). 01564 Value *StrippedPtr = PtrOp->stripPointerCasts(); 01565 PointerType *StrippedPtrTy = dyn_cast<PointerType>(StrippedPtr->getType()); 01566 01567 // We do not handle pointer-vector geps here. 01568 if (!StrippedPtrTy) 01569 return nullptr; 01570 01571 if (StrippedPtr != PtrOp) { 01572 bool HasZeroPointerIndex = false; 01573 if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1))) 01574 HasZeroPointerIndex = C->isZero(); 01575 01576 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... 01577 // into : GEP [10 x i8]* X, i32 0, ... 01578 // 01579 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ... 01580 // into : GEP i8* X, ... 01581 // 01582 // This occurs when the program declares an array extern like "int X[];" 01583 if (HasZeroPointerIndex) { 01584 PointerType *CPTy = cast<PointerType>(PtrOp->getType()); 01585 if (ArrayType *CATy = 01586 dyn_cast<ArrayType>(CPTy->getElementType())) { 01587 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ? 01588 if (CATy->getElementType() == StrippedPtrTy->getElementType()) { 01589 // -> GEP i8* X, ... 01590 SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end()); 01591 GetElementPtrInst *Res = 01592 GetElementPtrInst::Create(StrippedPtr, Idx, GEP.getName()); 01593 Res->setIsInBounds(GEP.isInBounds()); 01594 if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) 01595 return Res; 01596 // Insert Res, and create an addrspacecast. 01597 // e.g., 01598 // GEP (addrspacecast i8 addrspace(1)* X to [0 x i8]*), i32 0, ... 01599 // -> 01600 // %0 = GEP i8 addrspace(1)* X, ... 01601 // addrspacecast i8 addrspace(1)* %0 to i8* 01602 return new AddrSpaceCastInst(Builder->Insert(Res), GEP.getType()); 01603 } 01604 01605 if (ArrayType *XATy = 01606 dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){ 01607 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ? 01608 if (CATy->getElementType() == XATy->getElementType()) { 01609 // -> GEP [10 x i8]* X, i32 0, ... 01610 // At this point, we know that the cast source type is a pointer 01611 // to an array of the same type as the destination pointer 01612 // array. Because the array type is never stepped over (there 01613 // is a leading zero) we can fold the cast into this GEP. 01614 if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) { 01615 GEP.setOperand(0, StrippedPtr); 01616 return &GEP; 01617 } 01618 // Cannot replace the base pointer directly because StrippedPtr's 01619 // address space is different. Instead, create a new GEP followed by 01620 // an addrspacecast. 01621 // e.g., 01622 // GEP (addrspacecast [10 x i8] addrspace(1)* X to [0 x i8]*), 01623 // i32 0, ... 01624 // -> 01625 // %0 = GEP [10 x i8] addrspace(1)* X, ... 01626 // addrspacecast i8 addrspace(1)* %0 to i8* 01627 SmallVector<Value*, 8> Idx(GEP.idx_begin(), GEP.idx_end()); 01628 Value *NewGEP = GEP.isInBounds() ? 01629 Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) : 01630 Builder->CreateGEP(StrippedPtr, Idx, GEP.getName()); 01631 return new AddrSpaceCastInst(NewGEP, GEP.getType()); 01632 } 01633 } 01634 } 01635 } else if (GEP.getNumOperands() == 2) { 01636 // Transform things like: 01637 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V 01638 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast 01639 Type *SrcElTy = StrippedPtrTy->getElementType(); 01640 Type *ResElTy = PtrOp->getType()->getPointerElementType(); 01641 if (DL && SrcElTy->isArrayTy() && 01642 DL->getTypeAllocSize(SrcElTy->getArrayElementType()) == 01643 DL->getTypeAllocSize(ResElTy)) { 01644 Type *IdxType = DL->getIntPtrType(GEP.getType()); 01645 Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) }; 01646 Value *NewGEP = GEP.isInBounds() ? 01647 Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) : 01648 Builder->CreateGEP(StrippedPtr, Idx, GEP.getName()); 01649 01650 // V and GEP are both pointer types --> BitCast 01651 return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, 01652 GEP.getType()); 01653 } 01654 01655 // Transform things like: 01656 // %V = mul i64 %N, 4 01657 // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V 01658 // into: %t1 = getelementptr i32* %arr, i32 %N; bitcast 01659 if (DL && ResElTy->isSized() && SrcElTy->isSized()) { 01660 // Check that changing the type amounts to dividing the index by a scale 01661 // factor. 01662 uint64_t ResSize = DL->getTypeAllocSize(ResElTy); 01663 uint64_t SrcSize = DL->getTypeAllocSize(SrcElTy); 01664 if (ResSize && SrcSize % ResSize == 0) { 01665 Value *Idx = GEP.getOperand(1); 01666 unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits(); 01667 uint64_t Scale = SrcSize / ResSize; 01668 01669 // Earlier transforms ensure that the index has type IntPtrType, which 01670 // considerably simplifies the logic by eliminating implicit casts. 01671 assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) && 01672 "Index not cast to pointer width?"); 01673 01674 bool NSW; 01675 if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) { 01676 // Successfully decomposed Idx as NewIdx * Scale, form a new GEP. 01677 // If the multiplication NewIdx * Scale may overflow then the new 01678 // GEP may not be "inbounds". 01679 Value *NewGEP = GEP.isInBounds() && NSW ? 01680 Builder->CreateInBoundsGEP(StrippedPtr, NewIdx, GEP.getName()) : 01681 Builder->CreateGEP(StrippedPtr, NewIdx, GEP.getName()); 01682 01683 // The NewGEP must be pointer typed, so must the old one -> BitCast 01684 return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, 01685 GEP.getType()); 01686 } 01687 } 01688 } 01689 01690 // Similarly, transform things like: 01691 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp 01692 // (where tmp = 8*tmp2) into: 01693 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast 01694 if (DL && ResElTy->isSized() && SrcElTy->isSized() && 01695 SrcElTy->isArrayTy()) { 01696 // Check that changing to the array element type amounts to dividing the 01697 // index by a scale factor. 01698 uint64_t ResSize = DL->getTypeAllocSize(ResElTy); 01699 uint64_t ArrayEltSize 01700 = DL->getTypeAllocSize(SrcElTy->getArrayElementType()); 01701 if (ResSize && ArrayEltSize % ResSize == 0) { 01702 Value *Idx = GEP.getOperand(1); 01703 unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits(); 01704 uint64_t Scale = ArrayEltSize / ResSize; 01705 01706 // Earlier transforms ensure that the index has type IntPtrType, which 01707 // considerably simplifies the logic by eliminating implicit casts. 01708 assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) && 01709 "Index not cast to pointer width?"); 01710 01711 bool NSW; 01712 if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) { 01713 // Successfully decomposed Idx as NewIdx * Scale, form a new GEP. 01714 // If the multiplication NewIdx * Scale may overflow then the new 01715 // GEP may not be "inbounds". 01716 Value *Off[2] = { 01717 Constant::getNullValue(DL->getIntPtrType(GEP.getType())), 01718 NewIdx 01719 }; 01720 01721 Value *NewGEP = GEP.isInBounds() && NSW ? 01722 Builder->CreateInBoundsGEP(StrippedPtr, Off, GEP.getName()) : 01723 Builder->CreateGEP(StrippedPtr, Off, GEP.getName()); 01724 // The NewGEP must be pointer typed, so must the old one -> BitCast 01725 return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, 01726 GEP.getType()); 01727 } 01728 } 01729 } 01730 } 01731 } 01732 01733 if (!DL) 01734 return nullptr; 01735 01736 // addrspacecast between types is canonicalized as a bitcast, then an 01737 // addrspacecast. To take advantage of the below bitcast + struct GEP, look 01738 // through the addrspacecast. 01739 if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(PtrOp)) { 01740 // X = bitcast A addrspace(1)* to B addrspace(1)* 01741 // Y = addrspacecast A addrspace(1)* to B addrspace(2)* 01742 // Z = gep Y, <...constant indices...> 01743 // Into an addrspacecasted GEP of the struct. 01744 if (BitCastInst *BC = dyn_cast<BitCastInst>(ASC->getOperand(0))) 01745 PtrOp = BC; 01746 } 01747 01748 /// See if we can simplify: 01749 /// X = bitcast A* to B* 01750 /// Y = gep X, <...constant indices...> 01751 /// into a gep of the original struct. This is important for SROA and alias 01752 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged. 01753 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) { 01754 Value *Operand = BCI->getOperand(0); 01755 PointerType *OpType = cast<PointerType>(Operand->getType()); 01756 unsigned OffsetBits = DL->getPointerTypeSizeInBits(GEP.getType()); 01757 APInt Offset(OffsetBits, 0); 01758 if (!isa<BitCastInst>(Operand) && 01759 GEP.accumulateConstantOffset(*DL, Offset)) { 01760 01761 // If this GEP instruction doesn't move the pointer, just replace the GEP 01762 // with a bitcast of the real input to the dest type. 01763 if (!Offset) { 01764 // If the bitcast is of an allocation, and the allocation will be 01765 // converted to match the type of the cast, don't touch this. 01766 if (isa<AllocaInst>(Operand) || isAllocationFn(Operand, TLI)) { 01767 // See if the bitcast simplifies, if so, don't nuke this GEP yet. 01768 if (Instruction *I = visitBitCast(*BCI)) { 01769 if (I != BCI) { 01770 I->takeName(BCI); 01771 BCI->getParent()->getInstList().insert(BCI, I); 01772 ReplaceInstUsesWith(*BCI, I); 01773 } 01774 return &GEP; 01775 } 01776 } 01777 01778 if (Operand->getType()->getPointerAddressSpace() != GEP.getAddressSpace()) 01779 return new AddrSpaceCastInst(Operand, GEP.getType()); 01780 return new BitCastInst(Operand, GEP.getType()); 01781 } 01782 01783 // Otherwise, if the offset is non-zero, we need to find out if there is a 01784 // field at Offset in 'A's type. If so, we can pull the cast through the 01785 // GEP. 01786 SmallVector<Value*, 8> NewIndices; 01787 if (FindElementAtOffset(OpType, Offset.getSExtValue(), NewIndices)) { 01788 Value *NGEP = GEP.isInBounds() ? 01789 Builder->CreateInBoundsGEP(Operand, NewIndices) : 01790 Builder->CreateGEP(Operand, NewIndices); 01791 01792 if (NGEP->getType() == GEP.getType()) 01793 return ReplaceInstUsesWith(GEP, NGEP); 01794 NGEP->takeName(&GEP); 01795 01796 if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace()) 01797 return new AddrSpaceCastInst(NGEP, GEP.getType()); 01798 return new BitCastInst(NGEP, GEP.getType()); 01799 } 01800 } 01801 } 01802 01803 return nullptr; 01804 } 01805 01806 static bool 01807 isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users, 01808 const TargetLibraryInfo *TLI) { 01809 SmallVector<Instruction*, 4> Worklist; 01810 Worklist.push_back(AI); 01811 01812 do { 01813 Instruction *PI = Worklist.pop_back_val(); 01814 for (User *U : PI->users()) { 01815 Instruction *I = cast<Instruction>(U); 01816 switch (I->getOpcode()) { 01817 default: 01818 // Give up the moment we see something we can't handle. 01819 return false; 01820 01821 case Instruction::BitCast: 01822 case Instruction::GetElementPtr: 01823 Users.push_back(I); 01824 Worklist.push_back(I); 01825 continue; 01826 01827 case Instruction::ICmp: { 01828 ICmpInst *ICI = cast<ICmpInst>(I); 01829 // We can fold eq/ne comparisons with null to false/true, respectively. 01830 if (!ICI->isEquality() || !isa<ConstantPointerNull>(ICI->getOperand(1))) 01831 return false; 01832 Users.push_back(I); 01833 continue; 01834 } 01835 01836 case Instruction::Call: 01837 // Ignore no-op and store intrinsics. 01838 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 01839 switch (II->getIntrinsicID()) { 01840 default: 01841 return false; 01842 01843 case Intrinsic::memmove: 01844 case Intrinsic::memcpy: 01845 case Intrinsic::memset: { 01846 MemIntrinsic *MI = cast<MemIntrinsic>(II); 01847 if (MI->isVolatile() || MI->getRawDest() != PI) 01848 return false; 01849 } 01850 // fall through 01851 case Intrinsic::dbg_declare: 01852 case Intrinsic::dbg_value: 01853 case Intrinsic::invariant_start: 01854 case Intrinsic::invariant_end: 01855 case Intrinsic::lifetime_start: 01856 case Intrinsic::lifetime_end: 01857 case Intrinsic::objectsize: 01858 Users.push_back(I); 01859 continue; 01860 } 01861 } 01862 01863 if (isFreeCall(I, TLI)) { 01864 Users.push_back(I); 01865 continue; 01866 } 01867 return false; 01868 01869 case Instruction::Store: { 01870 StoreInst *SI = cast<StoreInst>(I); 01871 if (SI->isVolatile() || SI->getPointerOperand() != PI) 01872 return false; 01873 Users.push_back(I); 01874 continue; 01875 } 01876 } 01877 llvm_unreachable("missing a return?"); 01878 } 01879 } while (!Worklist.empty()); 01880 return true; 01881 } 01882 01883 Instruction *InstCombiner::visitAllocSite(Instruction &MI) { 01884 // If we have a malloc call which is only used in any amount of comparisons 01885 // to null and free calls, delete the calls and replace the comparisons with 01886 // true or false as appropriate. 01887 SmallVector<WeakVH, 64> Users; 01888 if (isAllocSiteRemovable(&MI, Users, TLI)) { 01889 for (unsigned i = 0, e = Users.size(); i != e; ++i) { 01890 Instruction *I = cast_or_null<Instruction>(&*Users[i]); 01891 if (!I) continue; 01892 01893 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) { 01894 ReplaceInstUsesWith(*C, 01895 ConstantInt::get(Type::getInt1Ty(C->getContext()), 01896 C->isFalseWhenEqual())); 01897 } else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) { 01898 ReplaceInstUsesWith(*I, UndefValue::get(I->getType())); 01899 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 01900 if (II->getIntrinsicID() == Intrinsic::objectsize) { 01901 ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1)); 01902 uint64_t DontKnow = CI->isZero() ? -1ULL : 0; 01903 ReplaceInstUsesWith(*I, ConstantInt::get(I->getType(), DontKnow)); 01904 } 01905 } 01906 EraseInstFromFunction(*I); 01907 } 01908 01909 if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) { 01910 // Replace invoke with a NOP intrinsic to maintain the original CFG 01911 Module *M = II->getParent()->getParent()->getParent(); 01912 Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing); 01913 InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(), 01914 None, "", II->getParent()); 01915 } 01916 return EraseInstFromFunction(MI); 01917 } 01918 return nullptr; 01919 } 01920 01921 /// \brief Move the call to free before a NULL test. 01922 /// 01923 /// Check if this free is accessed after its argument has been test 01924 /// against NULL (property 0). 01925 /// If yes, it is legal to move this call in its predecessor block. 01926 /// 01927 /// The move is performed only if the block containing the call to free 01928 /// will be removed, i.e.: 01929 /// 1. it has only one predecessor P, and P has two successors 01930 /// 2. it contains the call and an unconditional branch 01931 /// 3. its successor is the same as its predecessor's successor 01932 /// 01933 /// The profitability is out-of concern here and this function should 01934 /// be called only if the caller knows this transformation would be 01935 /// profitable (e.g., for code size). 01936 static Instruction * 01937 tryToMoveFreeBeforeNullTest(CallInst &FI) { 01938 Value *Op = FI.getArgOperand(0); 01939 BasicBlock *FreeInstrBB = FI.getParent(); 01940 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor(); 01941 01942 // Validate part of constraint #1: Only one predecessor 01943 // FIXME: We can extend the number of predecessor, but in that case, we 01944 // would duplicate the call to free in each predecessor and it may 01945 // not be profitable even for code size. 01946 if (!PredBB) 01947 return nullptr; 01948 01949 // Validate constraint #2: Does this block contains only the call to 01950 // free and an unconditional branch? 01951 // FIXME: We could check if we can speculate everything in the 01952 // predecessor block 01953 if (FreeInstrBB->size() != 2) 01954 return nullptr; 01955 BasicBlock *SuccBB; 01956 if (!match(FreeInstrBB->getTerminator(), m_UnconditionalBr(SuccBB))) 01957 return nullptr; 01958 01959 // Validate the rest of constraint #1 by matching on the pred branch. 01960 TerminatorInst *TI = PredBB->getTerminator(); 01961 BasicBlock *TrueBB, *FalseBB; 01962 ICmpInst::Predicate Pred; 01963 if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Op), m_Zero()), TrueBB, FalseBB))) 01964 return nullptr; 01965 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE) 01966 return nullptr; 01967 01968 // Validate constraint #3: Ensure the null case just falls through. 01969 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB)) 01970 return nullptr; 01971 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) && 01972 "Broken CFG: missing edge from predecessor to successor"); 01973 01974 FI.moveBefore(TI); 01975 return &FI; 01976 } 01977 01978 01979 Instruction *InstCombiner::visitFree(CallInst &FI) { 01980 Value *Op = FI.getArgOperand(0); 01981 01982 // free undef -> unreachable. 01983 if (isa<UndefValue>(Op)) { 01984 // Insert a new store to null because we cannot modify the CFG here. 01985 Builder->CreateStore(ConstantInt::getTrue(FI.getContext()), 01986 UndefValue::get(Type::getInt1PtrTy(FI.getContext()))); 01987 return EraseInstFromFunction(FI); 01988 } 01989 01990 // If we have 'free null' delete the instruction. This can happen in stl code 01991 // when lots of inlining happens. 01992 if (isa<ConstantPointerNull>(Op)) 01993 return EraseInstFromFunction(FI); 01994 01995 // If we optimize for code size, try to move the call to free before the null 01996 // test so that simplify cfg can remove the empty block and dead code 01997 // elimination the branch. I.e., helps to turn something like: 01998 // if (foo) free(foo); 01999 // into 02000 // free(foo); 02001 if (MinimizeSize) 02002 if (Instruction *I = tryToMoveFreeBeforeNullTest(FI)) 02003 return I; 02004 02005 return nullptr; 02006 } 02007 02008 Instruction *InstCombiner::visitReturnInst(ReturnInst &RI) { 02009 if (RI.getNumOperands() == 0) // ret void 02010 return nullptr; 02011 02012 Value *ResultOp = RI.getOperand(0); 02013 Type *VTy = ResultOp->getType(); 02014 if (!VTy->isIntegerTy()) 02015 return nullptr; 02016 02017 // There might be assume intrinsics dominating this return that completely 02018 // determine the value. If so, constant fold it. 02019 unsigned BitWidth = VTy->getPrimitiveSizeInBits(); 02020 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 02021 computeKnownBits(ResultOp, KnownZero, KnownOne, 0, &RI); 02022 if ((KnownZero|KnownOne).isAllOnesValue()) 02023 RI.setOperand(0, Constant::getIntegerValue(VTy, KnownOne)); 02024 02025 return nullptr; 02026 } 02027 02028 Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { 02029 // Change br (not X), label True, label False to: br X, label False, True 02030 Value *X = nullptr; 02031 BasicBlock *TrueDest; 02032 BasicBlock *FalseDest; 02033 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) && 02034 !isa<Constant>(X)) { 02035 // Swap Destinations and condition... 02036 BI.setCondition(X); 02037 BI.swapSuccessors(); 02038 return &BI; 02039 } 02040 02041 // Canonicalize fcmp_one -> fcmp_oeq 02042 FCmpInst::Predicate FPred; Value *Y; 02043 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)), 02044 TrueDest, FalseDest)) && 02045 BI.getCondition()->hasOneUse()) 02046 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE || 02047 FPred == FCmpInst::FCMP_OGE) { 02048 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition()); 02049 Cond->setPredicate(FCmpInst::getInversePredicate(FPred)); 02050 02051 // Swap Destinations and condition. 02052 BI.swapSuccessors(); 02053 Worklist.Add(Cond); 02054 return &BI; 02055 } 02056 02057 // Canonicalize icmp_ne -> icmp_eq 02058 ICmpInst::Predicate IPred; 02059 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)), 02060 TrueDest, FalseDest)) && 02061 BI.getCondition()->hasOneUse()) 02062 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE || 02063 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE || 02064 IPred == ICmpInst::ICMP_SGE) { 02065 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition()); 02066 Cond->setPredicate(ICmpInst::getInversePredicate(IPred)); 02067 // Swap Destinations and condition. 02068 BI.swapSuccessors(); 02069 Worklist.Add(Cond); 02070 return &BI; 02071 } 02072 02073 return nullptr; 02074 } 02075 02076 Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) { 02077 Value *Cond = SI.getCondition(); 02078 if (Instruction *I = dyn_cast<Instruction>(Cond)) { 02079 if (I->getOpcode() == Instruction::Add) 02080 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 02081 // change 'switch (X+4) case 1:' into 'switch (X) case -3' 02082 // Skip the first item since that's the default case. 02083 for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end(); 02084 i != e; ++i) { 02085 ConstantInt* CaseVal = i.getCaseValue(); 02086 Constant* NewCaseVal = ConstantExpr::getSub(cast<Constant>(CaseVal), 02087 AddRHS); 02088 assert(isa<ConstantInt>(NewCaseVal) && 02089 "Result of expression should be constant"); 02090 i.setValue(cast<ConstantInt>(NewCaseVal)); 02091 } 02092 SI.setCondition(I->getOperand(0)); 02093 Worklist.Add(I); 02094 return &SI; 02095 } 02096 } 02097 return nullptr; 02098 } 02099 02100 Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { 02101 Value *Agg = EV.getAggregateOperand(); 02102 02103 if (!EV.hasIndices()) 02104 return ReplaceInstUsesWith(EV, Agg); 02105 02106 if (Constant *C = dyn_cast<Constant>(Agg)) { 02107 if (Constant *C2 = C->getAggregateElement(*EV.idx_begin())) { 02108 if (EV.getNumIndices() == 0) 02109 return ReplaceInstUsesWith(EV, C2); 02110 // Extract the remaining indices out of the constant indexed by the 02111 // first index 02112 return ExtractValueInst::Create(C2, EV.getIndices().slice(1)); 02113 } 02114 return nullptr; // Can't handle other constants 02115 } 02116 02117 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) { 02118 // We're extracting from an insertvalue instruction, compare the indices 02119 const unsigned *exti, *exte, *insi, *inse; 02120 for (exti = EV.idx_begin(), insi = IV->idx_begin(), 02121 exte = EV.idx_end(), inse = IV->idx_end(); 02122 exti != exte && insi != inse; 02123 ++exti, ++insi) { 02124 if (*insi != *exti) 02125 // The insert and extract both reference distinctly different elements. 02126 // This means the extract is not influenced by the insert, and we can 02127 // replace the aggregate operand of the extract with the aggregate 02128 // operand of the insert. i.e., replace 02129 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 02130 // %E = extractvalue { i32, { i32 } } %I, 0 02131 // with 02132 // %E = extractvalue { i32, { i32 } } %A, 0 02133 return ExtractValueInst::Create(IV->getAggregateOperand(), 02134 EV.getIndices()); 02135 } 02136 if (exti == exte && insi == inse) 02137 // Both iterators are at the end: Index lists are identical. Replace 02138 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 02139 // %C = extractvalue { i32, { i32 } } %B, 1, 0 02140 // with "i32 42" 02141 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand()); 02142 if (exti == exte) { 02143 // The extract list is a prefix of the insert list. i.e. replace 02144 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 02145 // %E = extractvalue { i32, { i32 } } %I, 1 02146 // with 02147 // %X = extractvalue { i32, { i32 } } %A, 1 02148 // %E = insertvalue { i32 } %X, i32 42, 0 02149 // by switching the order of the insert and extract (though the 02150 // insertvalue should be left in, since it may have other uses). 02151 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(), 02152 EV.getIndices()); 02153 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(), 02154 makeArrayRef(insi, inse)); 02155 } 02156 if (insi == inse) 02157 // The insert list is a prefix of the extract list 02158 // We can simply remove the common indices from the extract and make it 02159 // operate on the inserted value instead of the insertvalue result. 02160 // i.e., replace 02161 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 02162 // %E = extractvalue { i32, { i32 } } %I, 1, 0 02163 // with 02164 // %E extractvalue { i32 } { i32 42 }, 0 02165 return ExtractValueInst::Create(IV->getInsertedValueOperand(), 02166 makeArrayRef(exti, exte)); 02167 } 02168 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) { 02169 // We're extracting from an intrinsic, see if we're the only user, which 02170 // allows us to simplify multiple result intrinsics to simpler things that 02171 // just get one value. 02172 if (II->hasOneUse()) { 02173 // Check if we're grabbing the overflow bit or the result of a 'with 02174 // overflow' intrinsic. If it's the latter we can remove the intrinsic 02175 // and replace it with a traditional binary instruction. 02176 switch (II->getIntrinsicID()) { 02177 case Intrinsic::uadd_with_overflow: 02178 case Intrinsic::sadd_with_overflow: 02179 if (*EV.idx_begin() == 0) { // Normal result. 02180 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 02181 ReplaceInstUsesWith(*II, UndefValue::get(II->getType())); 02182 EraseInstFromFunction(*II); 02183 return BinaryOperator::CreateAdd(LHS, RHS); 02184 } 02185 02186 // If the normal result of the add is dead, and the RHS is a constant, 02187 // we can transform this into a range comparison. 02188 // overflow = uadd a, -4 --> overflow = icmp ugt a, 3 02189 if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow) 02190 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1))) 02191 return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0), 02192 ConstantExpr::getNot(CI)); 02193 break; 02194 case Intrinsic::usub_with_overflow: 02195 case Intrinsic::ssub_with_overflow: 02196 if (*EV.idx_begin() == 0) { // Normal result. 02197 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 02198 ReplaceInstUsesWith(*II, UndefValue::get(II->getType())); 02199 EraseInstFromFunction(*II); 02200 return BinaryOperator::CreateSub(LHS, RHS); 02201 } 02202 break; 02203 case Intrinsic::umul_with_overflow: 02204 case Intrinsic::smul_with_overflow: 02205 if (*EV.idx_begin() == 0) { // Normal result. 02206 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 02207 ReplaceInstUsesWith(*II, UndefValue::get(II->getType())); 02208 EraseInstFromFunction(*II); 02209 return BinaryOperator::CreateMul(LHS, RHS); 02210 } 02211 break; 02212 default: 02213 break; 02214 } 02215 } 02216 } 02217 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) 02218 // If the (non-volatile) load only has one use, we can rewrite this to a 02219 // load from a GEP. This reduces the size of the load. 02220 // FIXME: If a load is used only by extractvalue instructions then this 02221 // could be done regardless of having multiple uses. 02222 if (L->isSimple() && L->hasOneUse()) { 02223 // extractvalue has integer indices, getelementptr has Value*s. Convert. 02224 SmallVector<Value*, 4> Indices; 02225 // Prefix an i32 0 since we need the first element. 02226 Indices.push_back(Builder->getInt32(0)); 02227 for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end(); 02228 I != E; ++I) 02229 Indices.push_back(Builder->getInt32(*I)); 02230 02231 // We need to insert these at the location of the old load, not at that of 02232 // the extractvalue. 02233 Builder->SetInsertPoint(L->getParent(), L); 02234 Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(), Indices); 02235 // Returning the load directly will cause the main loop to insert it in 02236 // the wrong spot, so use ReplaceInstUsesWith(). 02237 return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP)); 02238 } 02239 // We could simplify extracts from other values. Note that nested extracts may 02240 // already be simplified implicitly by the above: extract (extract (insert) ) 02241 // will be translated into extract ( insert ( extract ) ) first and then just 02242 // the value inserted, if appropriate. Similarly for extracts from single-use 02243 // loads: extract (extract (load)) will be translated to extract (load (gep)) 02244 // and if again single-use then via load (gep (gep)) to load (gep). 02245 // However, double extracts from e.g. function arguments or return values 02246 // aren't handled yet. 02247 return nullptr; 02248 } 02249 02250 enum Personality_Type { 02251 Unknown_Personality, 02252 GNU_Ada_Personality, 02253 GNU_CXX_Personality, 02254 GNU_ObjC_Personality 02255 }; 02256 02257 /// RecognizePersonality - See if the given exception handling personality 02258 /// function is one that we understand. If so, return a description of it; 02259 /// otherwise return Unknown_Personality. 02260 static Personality_Type RecognizePersonality(Value *Pers) { 02261 Function *F = dyn_cast<Function>(Pers->stripPointerCasts()); 02262 if (!F) 02263 return Unknown_Personality; 02264 return StringSwitch<Personality_Type>(F->getName()) 02265 .Case("__gnat_eh_personality", GNU_Ada_Personality) 02266 .Case("__gxx_personality_v0", GNU_CXX_Personality) 02267 .Case("__objc_personality_v0", GNU_ObjC_Personality) 02268 .Default(Unknown_Personality); 02269 } 02270 02271 /// isCatchAll - Return 'true' if the given typeinfo will match anything. 02272 static bool isCatchAll(Personality_Type Personality, Constant *TypeInfo) { 02273 switch (Personality) { 02274 case Unknown_Personality: 02275 return false; 02276 case GNU_Ada_Personality: 02277 // While __gnat_all_others_value will match any Ada exception, it doesn't 02278 // match foreign exceptions (or didn't, before gcc-4.7). 02279 return false; 02280 case GNU_CXX_Personality: 02281 case GNU_ObjC_Personality: 02282 return TypeInfo->isNullValue(); 02283 } 02284 llvm_unreachable("Unknown personality!"); 02285 } 02286 02287 static bool shorter_filter(const Value *LHS, const Value *RHS) { 02288 return 02289 cast<ArrayType>(LHS->getType())->getNumElements() 02290 < 02291 cast<ArrayType>(RHS->getType())->getNumElements(); 02292 } 02293 02294 Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) { 02295 // The logic here should be correct for any real-world personality function. 02296 // However if that turns out not to be true, the offending logic can always 02297 // be conditioned on the personality function, like the catch-all logic is. 02298 Personality_Type Personality = RecognizePersonality(LI.getPersonalityFn()); 02299 02300 // Simplify the list of clauses, eg by removing repeated catch clauses 02301 // (these are often created by inlining). 02302 bool MakeNewInstruction = false; // If true, recreate using the following: 02303 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction; 02304 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup. 02305 02306 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already. 02307 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) { 02308 bool isLastClause = i + 1 == e; 02309 if (LI.isCatch(i)) { 02310 // A catch clause. 02311 Constant *CatchClause = LI.getClause(i); 02312 Constant *TypeInfo = CatchClause->stripPointerCasts(); 02313 02314 // If we already saw this clause, there is no point in having a second 02315 // copy of it. 02316 if (AlreadyCaught.insert(TypeInfo)) { 02317 // This catch clause was not already seen. 02318 NewClauses.push_back(CatchClause); 02319 } else { 02320 // Repeated catch clause - drop the redundant copy. 02321 MakeNewInstruction = true; 02322 } 02323 02324 // If this is a catch-all then there is no point in keeping any following 02325 // clauses or marking the landingpad as having a cleanup. 02326 if (isCatchAll(Personality, TypeInfo)) { 02327 if (!isLastClause) 02328 MakeNewInstruction = true; 02329 CleanupFlag = false; 02330 break; 02331 } 02332 } else { 02333 // A filter clause. If any of the filter elements were already caught 02334 // then they can be dropped from the filter. It is tempting to try to 02335 // exploit the filter further by saying that any typeinfo that does not 02336 // occur in the filter can't be caught later (and thus can be dropped). 02337 // However this would be wrong, since typeinfos can match without being 02338 // equal (for example if one represents a C++ class, and the other some 02339 // class derived from it). 02340 assert(LI.isFilter(i) && "Unsupported landingpad clause!"); 02341 Constant *FilterClause = LI.getClause(i); 02342 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType()); 02343 unsigned NumTypeInfos = FilterType->getNumElements(); 02344 02345 // An empty filter catches everything, so there is no point in keeping any 02346 // following clauses or marking the landingpad as having a cleanup. By 02347 // dealing with this case here the following code is made a bit simpler. 02348 if (!NumTypeInfos) { 02349 NewClauses.push_back(FilterClause); 02350 if (!isLastClause) 02351 MakeNewInstruction = true; 02352 CleanupFlag = false; 02353 break; 02354 } 02355 02356 bool MakeNewFilter = false; // If true, make a new filter. 02357 SmallVector<Constant *, 16> NewFilterElts; // New elements. 02358 if (isa<ConstantAggregateZero>(FilterClause)) { 02359 // Not an empty filter - it contains at least one null typeinfo. 02360 assert(NumTypeInfos > 0 && "Should have handled empty filter already!"); 02361 Constant *TypeInfo = 02362 Constant::getNullValue(FilterType->getElementType()); 02363 // If this typeinfo is a catch-all then the filter can never match. 02364 if (isCatchAll(Personality, TypeInfo)) { 02365 // Throw the filter away. 02366 MakeNewInstruction = true; 02367 continue; 02368 } 02369 02370 // There is no point in having multiple copies of this typeinfo, so 02371 // discard all but the first copy if there is more than one. 02372 NewFilterElts.push_back(TypeInfo); 02373 if (NumTypeInfos > 1) 02374 MakeNewFilter = true; 02375 } else { 02376 ConstantArray *Filter = cast<ConstantArray>(FilterClause); 02377 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements. 02378 NewFilterElts.reserve(NumTypeInfos); 02379 02380 // Remove any filter elements that were already caught or that already 02381 // occurred in the filter. While there, see if any of the elements are 02382 // catch-alls. If so, the filter can be discarded. 02383 bool SawCatchAll = false; 02384 for (unsigned j = 0; j != NumTypeInfos; ++j) { 02385 Constant *Elt = Filter->getOperand(j); 02386 Constant *TypeInfo = Elt->stripPointerCasts(); 02387 if (isCatchAll(Personality, TypeInfo)) { 02388 // This element is a catch-all. Bail out, noting this fact. 02389 SawCatchAll = true; 02390 break; 02391 } 02392 if (AlreadyCaught.count(TypeInfo)) 02393 // Already caught by an earlier clause, so having it in the filter 02394 // is pointless. 02395 continue; 02396 // There is no point in having multiple copies of the same typeinfo in 02397 // a filter, so only add it if we didn't already. 02398 if (SeenInFilter.insert(TypeInfo)) 02399 NewFilterElts.push_back(cast<Constant>(Elt)); 02400 } 02401 // A filter containing a catch-all cannot match anything by definition. 02402 if (SawCatchAll) { 02403 // Throw the filter away. 02404 MakeNewInstruction = true; 02405 continue; 02406 } 02407 02408 // If we dropped something from the filter, make a new one. 02409 if (NewFilterElts.size() < NumTypeInfos) 02410 MakeNewFilter = true; 02411 } 02412 if (MakeNewFilter) { 02413 FilterType = ArrayType::get(FilterType->getElementType(), 02414 NewFilterElts.size()); 02415 FilterClause = ConstantArray::get(FilterType, NewFilterElts); 02416 MakeNewInstruction = true; 02417 } 02418 02419 NewClauses.push_back(FilterClause); 02420 02421 // If the new filter is empty then it will catch everything so there is 02422 // no point in keeping any following clauses or marking the landingpad 02423 // as having a cleanup. The case of the original filter being empty was 02424 // already handled above. 02425 if (MakeNewFilter && !NewFilterElts.size()) { 02426 assert(MakeNewInstruction && "New filter but not a new instruction!"); 02427 CleanupFlag = false; 02428 break; 02429 } 02430 } 02431 } 02432 02433 // If several filters occur in a row then reorder them so that the shortest 02434 // filters come first (those with the smallest number of elements). This is 02435 // advantageous because shorter filters are more likely to match, speeding up 02436 // unwinding, but mostly because it increases the effectiveness of the other 02437 // filter optimizations below. 02438 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) { 02439 unsigned j; 02440 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters. 02441 for (j = i; j != e; ++j) 02442 if (!isa<ArrayType>(NewClauses[j]->getType())) 02443 break; 02444 02445 // Check whether the filters are already sorted by length. We need to know 02446 // if sorting them is actually going to do anything so that we only make a 02447 // new landingpad instruction if it does. 02448 for (unsigned k = i; k + 1 < j; ++k) 02449 if (shorter_filter(NewClauses[k+1], NewClauses[k])) { 02450 // Not sorted, so sort the filters now. Doing an unstable sort would be 02451 // correct too but reordering filters pointlessly might confuse users. 02452 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j, 02453 shorter_filter); 02454 MakeNewInstruction = true; 02455 break; 02456 } 02457 02458 // Look for the next batch of filters. 02459 i = j + 1; 02460 } 02461 02462 // If typeinfos matched if and only if equal, then the elements of a filter L 02463 // that occurs later than a filter F could be replaced by the intersection of 02464 // the elements of F and L. In reality two typeinfos can match without being 02465 // equal (for example if one represents a C++ class, and the other some class 02466 // derived from it) so it would be wrong to perform this transform in general. 02467 // However the transform is correct and useful if F is a subset of L. In that 02468 // case L can be replaced by F, and thus removed altogether since repeating a 02469 // filter is pointless. So here we look at all pairs of filters F and L where 02470 // L follows F in the list of clauses, and remove L if every element of F is 02471 // an element of L. This can occur when inlining C++ functions with exception 02472 // specifications. 02473 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) { 02474 // Examine each filter in turn. 02475 Value *Filter = NewClauses[i]; 02476 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType()); 02477 if (!FTy) 02478 // Not a filter - skip it. 02479 continue; 02480 unsigned FElts = FTy->getNumElements(); 02481 // Examine each filter following this one. Doing this backwards means that 02482 // we don't have to worry about filters disappearing under us when removed. 02483 for (unsigned j = NewClauses.size() - 1; j != i; --j) { 02484 Value *LFilter = NewClauses[j]; 02485 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType()); 02486 if (!LTy) 02487 // Not a filter - skip it. 02488 continue; 02489 // If Filter is a subset of LFilter, i.e. every element of Filter is also 02490 // an element of LFilter, then discard LFilter. 02491 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j; 02492 // If Filter is empty then it is a subset of LFilter. 02493 if (!FElts) { 02494 // Discard LFilter. 02495 NewClauses.erase(J); 02496 MakeNewInstruction = true; 02497 // Move on to the next filter. 02498 continue; 02499 } 02500 unsigned LElts = LTy->getNumElements(); 02501 // If Filter is longer than LFilter then it cannot be a subset of it. 02502 if (FElts > LElts) 02503 // Move on to the next filter. 02504 continue; 02505 // At this point we know that LFilter has at least one element. 02506 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros. 02507 // Filter is a subset of LFilter iff Filter contains only zeros (as we 02508 // already know that Filter is not longer than LFilter). 02509 if (isa<ConstantAggregateZero>(Filter)) { 02510 assert(FElts <= LElts && "Should have handled this case earlier!"); 02511 // Discard LFilter. 02512 NewClauses.erase(J); 02513 MakeNewInstruction = true; 02514 } 02515 // Move on to the next filter. 02516 continue; 02517 } 02518 ConstantArray *LArray = cast<ConstantArray>(LFilter); 02519 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros. 02520 // Since Filter is non-empty and contains only zeros, it is a subset of 02521 // LFilter iff LFilter contains a zero. 02522 assert(FElts > 0 && "Should have eliminated the empty filter earlier!"); 02523 for (unsigned l = 0; l != LElts; ++l) 02524 if (LArray->getOperand(l)->isNullValue()) { 02525 // LFilter contains a zero - discard it. 02526 NewClauses.erase(J); 02527 MakeNewInstruction = true; 02528 break; 02529 } 02530 // Move on to the next filter. 02531 continue; 02532 } 02533 // At this point we know that both filters are ConstantArrays. Loop over 02534 // operands to see whether every element of Filter is also an element of 02535 // LFilter. Since filters tend to be short this is probably faster than 02536 // using a method that scales nicely. 02537 ConstantArray *FArray = cast<ConstantArray>(Filter); 02538 bool AllFound = true; 02539 for (unsigned f = 0; f != FElts; ++f) { 02540 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts(); 02541 AllFound = false; 02542 for (unsigned l = 0; l != LElts; ++l) { 02543 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts(); 02544 if (LTypeInfo == FTypeInfo) { 02545 AllFound = true; 02546 break; 02547 } 02548 } 02549 if (!AllFound) 02550 break; 02551 } 02552 if (AllFound) { 02553 // Discard LFilter. 02554 NewClauses.erase(J); 02555 MakeNewInstruction = true; 02556 } 02557 // Move on to the next filter. 02558 } 02559 } 02560 02561 // If we changed any of the clauses, replace the old landingpad instruction 02562 // with a new one. 02563 if (MakeNewInstruction) { 02564 LandingPadInst *NLI = LandingPadInst::Create(LI.getType(), 02565 LI.getPersonalityFn(), 02566 NewClauses.size()); 02567 for (unsigned i = 0, e = NewClauses.size(); i != e; ++i) 02568 NLI->addClause(NewClauses[i]); 02569 // A landing pad with no clauses must have the cleanup flag set. It is 02570 // theoretically possible, though highly unlikely, that we eliminated all 02571 // clauses. If so, force the cleanup flag to true. 02572 if (NewClauses.empty()) 02573 CleanupFlag = true; 02574 NLI->setCleanup(CleanupFlag); 02575 return NLI; 02576 } 02577 02578 // Even if none of the clauses changed, we may nonetheless have understood 02579 // that the cleanup flag is pointless. Clear it if so. 02580 if (LI.isCleanup() != CleanupFlag) { 02581 assert(!CleanupFlag && "Adding a cleanup, not removing one?!"); 02582 LI.setCleanup(CleanupFlag); 02583 return &LI; 02584 } 02585 02586 return nullptr; 02587 } 02588 02589 02590 02591 02592 /// TryToSinkInstruction - Try to move the specified instruction from its 02593 /// current block into the beginning of DestBlock, which can only happen if it's 02594 /// safe to move the instruction past all of the instructions between it and the 02595 /// end of its block. 02596 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) { 02597 assert(I->hasOneUse() && "Invariants didn't hold!"); 02598 02599 // Cannot move control-flow-involving, volatile loads, vaarg, etc. 02600 if (isa<PHINode>(I) || isa<LandingPadInst>(I) || I->mayHaveSideEffects() || 02601 isa<TerminatorInst>(I)) 02602 return false; 02603 02604 // Do not sink alloca instructions out of the entry block. 02605 if (isa<AllocaInst>(I) && I->getParent() == 02606 &DestBlock->getParent()->getEntryBlock()) 02607 return false; 02608 02609 // We can only sink load instructions if there is nothing between the load and 02610 // the end of block that could change the value. 02611 if (I->mayReadFromMemory()) { 02612 for (BasicBlock::iterator Scan = I, E = I->getParent()->end(); 02613 Scan != E; ++Scan) 02614 if (Scan->mayWriteToMemory()) 02615 return false; 02616 } 02617 02618 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt(); 02619 I->moveBefore(InsertPos); 02620 ++NumSunkInst; 02621 return true; 02622 } 02623 02624 02625 /// AddReachableCodeToWorklist - Walk the function in depth-first order, adding 02626 /// all reachable code to the worklist. 02627 /// 02628 /// This has a couple of tricks to make the code faster and more powerful. In 02629 /// particular, we constant fold and DCE instructions as we go, to avoid adding 02630 /// them to the worklist (this significantly speeds up instcombine on code where 02631 /// many instructions are dead or constant). Additionally, if we find a branch 02632 /// whose condition is a known constant, we only visit the reachable successors. 02633 /// 02634 static bool AddReachableCodeToWorklist(BasicBlock *BB, 02635 SmallPtrSetImpl<BasicBlock*> &Visited, 02636 InstCombiner &IC, 02637 const DataLayout *DL, 02638 const TargetLibraryInfo *TLI) { 02639 bool MadeIRChange = false; 02640 SmallVector<BasicBlock*, 256> Worklist; 02641 Worklist.push_back(BB); 02642 02643 SmallVector<Instruction*, 128> InstrsForInstCombineWorklist; 02644 DenseMap<ConstantExpr*, Constant*> FoldedConstants; 02645 02646 do { 02647 BB = Worklist.pop_back_val(); 02648 02649 // We have now visited this block! If we've already been here, ignore it. 02650 if (!Visited.insert(BB)) continue; 02651 02652 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 02653 Instruction *Inst = BBI++; 02654 02655 // DCE instruction if trivially dead. 02656 if (isInstructionTriviallyDead(Inst, TLI)) { 02657 ++NumDeadInst; 02658 DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n'); 02659 Inst->eraseFromParent(); 02660 continue; 02661 } 02662 02663 // ConstantProp instruction if trivially constant. 02664 if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0))) 02665 if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) { 02666 DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " 02667 << *Inst << '\n'); 02668 Inst->replaceAllUsesWith(C); 02669 ++NumConstProp; 02670 Inst->eraseFromParent(); 02671 continue; 02672 } 02673 02674 if (DL) { 02675 // See if we can constant fold its operands. 02676 for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end(); 02677 i != e; ++i) { 02678 ConstantExpr *CE = dyn_cast<ConstantExpr>(i); 02679 if (CE == nullptr) continue; 02680 02681 Constant*& FoldRes = FoldedConstants[CE]; 02682 if (!FoldRes) 02683 FoldRes = ConstantFoldConstantExpression(CE, DL, TLI); 02684 if (!FoldRes) 02685 FoldRes = CE; 02686 02687 if (FoldRes != CE) { 02688 *i = FoldRes; 02689 MadeIRChange = true; 02690 } 02691 } 02692 } 02693 02694 InstrsForInstCombineWorklist.push_back(Inst); 02695 } 02696 02697 // Recursively visit successors. If this is a branch or switch on a 02698 // constant, only visit the reachable successor. 02699 TerminatorInst *TI = BB->getTerminator(); 02700 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 02701 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) { 02702 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue(); 02703 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal); 02704 Worklist.push_back(ReachableBB); 02705 continue; 02706 } 02707 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 02708 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) { 02709 // See if this is an explicit destination. 02710 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); 02711 i != e; ++i) 02712 if (i.getCaseValue() == Cond) { 02713 BasicBlock *ReachableBB = i.getCaseSuccessor(); 02714 Worklist.push_back(ReachableBB); 02715 continue; 02716 } 02717 02718 // Otherwise it is the default destination. 02719 Worklist.push_back(SI->getDefaultDest()); 02720 continue; 02721 } 02722 } 02723 02724 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) 02725 Worklist.push_back(TI->getSuccessor(i)); 02726 } while (!Worklist.empty()); 02727 02728 // Once we've found all of the instructions to add to instcombine's worklist, 02729 // add them in reverse order. This way instcombine will visit from the top 02730 // of the function down. This jives well with the way that it adds all uses 02731 // of instructions to the worklist after doing a transformation, thus avoiding 02732 // some N^2 behavior in pathological cases. 02733 IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0], 02734 InstrsForInstCombineWorklist.size()); 02735 02736 return MadeIRChange; 02737 } 02738 02739 bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { 02740 MadeIRChange = false; 02741 02742 DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " 02743 << F.getName() << "\n"); 02744 02745 { 02746 // Do a depth-first traversal of the function, populate the worklist with 02747 // the reachable instructions. Ignore blocks that are not reachable. Keep 02748 // track of which blocks we visit. 02749 SmallPtrSet<BasicBlock*, 64> Visited; 02750 MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, DL, 02751 TLI); 02752 02753 // Do a quick scan over the function. If we find any blocks that are 02754 // unreachable, remove any instructions inside of them. This prevents 02755 // the instcombine code from having to deal with some bad special cases. 02756 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { 02757 if (Visited.count(BB)) continue; 02758 02759 // Delete the instructions backwards, as it has a reduced likelihood of 02760 // having to update as many def-use and use-def chains. 02761 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted. 02762 while (EndInst != BB->begin()) { 02763 // Delete the next to last instruction. 02764 BasicBlock::iterator I = EndInst; 02765 Instruction *Inst = --I; 02766 if (!Inst->use_empty()) 02767 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType())); 02768 if (isa<LandingPadInst>(Inst)) { 02769 EndInst = Inst; 02770 continue; 02771 } 02772 if (!isa<DbgInfoIntrinsic>(Inst)) { 02773 ++NumDeadInst; 02774 MadeIRChange = true; 02775 } 02776 Inst->eraseFromParent(); 02777 } 02778 } 02779 } 02780 02781 while (!Worklist.isEmpty()) { 02782 Instruction *I = Worklist.RemoveOne(); 02783 if (I == nullptr) continue; // skip null values. 02784 02785 // Check to see if we can DCE the instruction. 02786 if (isInstructionTriviallyDead(I, TLI)) { 02787 DEBUG(dbgs() << "IC: DCE: " << *I << '\n'); 02788 EraseInstFromFunction(*I); 02789 ++NumDeadInst; 02790 MadeIRChange = true; 02791 continue; 02792 } 02793 02794 // Instruction isn't dead, see if we can constant propagate it. 02795 if (!I->use_empty() && isa<Constant>(I->getOperand(0))) 02796 if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) { 02797 DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n'); 02798 02799 // Add operands to the worklist. 02800 ReplaceInstUsesWith(*I, C); 02801 ++NumConstProp; 02802 EraseInstFromFunction(*I); 02803 MadeIRChange = true; 02804 continue; 02805 } 02806 02807 // See if we can trivially sink this instruction to a successor basic block. 02808 if (I->hasOneUse()) { 02809 BasicBlock *BB = I->getParent(); 02810 Instruction *UserInst = cast<Instruction>(*I->user_begin()); 02811 BasicBlock *UserParent; 02812 02813 // Get the block the use occurs in. 02814 if (PHINode *PN = dyn_cast<PHINode>(UserInst)) 02815 UserParent = PN->getIncomingBlock(*I->use_begin()); 02816 else 02817 UserParent = UserInst->getParent(); 02818 02819 if (UserParent != BB) { 02820 bool UserIsSuccessor = false; 02821 // See if the user is one of our successors. 02822 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI) 02823 if (*SI == UserParent) { 02824 UserIsSuccessor = true; 02825 break; 02826 } 02827 02828 // If the user is one of our immediate successors, and if that successor 02829 // only has us as a predecessors (we'd have to split the critical edge 02830 // otherwise), we can keep going. 02831 if (UserIsSuccessor && UserParent->getSinglePredecessor()) { 02832 // Okay, the CFG is simple enough, try to sink this instruction. 02833 if (TryToSinkInstruction(I, UserParent)) { 02834 MadeIRChange = true; 02835 // We'll add uses of the sunk instruction below, but since sinking 02836 // can expose opportunities for it's *operands* add them to the 02837 // worklist 02838 for (Use &U : I->operands()) 02839 if (Instruction *OpI = dyn_cast<Instruction>(U.get())) 02840 Worklist.Add(OpI); 02841 } 02842 } 02843 } 02844 } 02845 02846 // Now that we have an instruction, try combining it to simplify it. 02847 Builder->SetInsertPoint(I->getParent(), I); 02848 Builder->SetCurrentDebugLocation(I->getDebugLoc()); 02849 02850 #ifndef NDEBUG 02851 std::string OrigI; 02852 #endif 02853 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str();); 02854 DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n'); 02855 02856 if (Instruction *Result = visit(*I)) { 02857 ++NumCombined; 02858 // Should we replace the old instruction with a new one? 02859 if (Result != I) { 02860 DEBUG(dbgs() << "IC: Old = " << *I << '\n' 02861 << " New = " << *Result << '\n'); 02862 02863 if (!I->getDebugLoc().isUnknown()) 02864 Result->setDebugLoc(I->getDebugLoc()); 02865 // Everything uses the new instruction now. 02866 I->replaceAllUsesWith(Result); 02867 02868 // Move the name to the new instruction first. 02869 Result->takeName(I); 02870 02871 // Push the new instruction and any users onto the worklist. 02872 Worklist.Add(Result); 02873 Worklist.AddUsersToWorkList(*Result); 02874 02875 // Insert the new instruction into the basic block... 02876 BasicBlock *InstParent = I->getParent(); 02877 BasicBlock::iterator InsertPos = I; 02878 02879 // If we replace a PHI with something that isn't a PHI, fix up the 02880 // insertion point. 02881 if (!isa<PHINode>(Result) && isa<PHINode>(InsertPos)) 02882 InsertPos = InstParent->getFirstInsertionPt(); 02883 02884 InstParent->getInstList().insert(InsertPos, Result); 02885 02886 EraseInstFromFunction(*I); 02887 } else { 02888 #ifndef NDEBUG 02889 DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n' 02890 << " New = " << *I << '\n'); 02891 #endif 02892 02893 // If the instruction was modified, it's possible that it is now dead. 02894 // if so, remove it. 02895 if (isInstructionTriviallyDead(I, TLI)) { 02896 EraseInstFromFunction(*I); 02897 } else { 02898 Worklist.Add(I); 02899 Worklist.AddUsersToWorkList(*I); 02900 } 02901 } 02902 MadeIRChange = true; 02903 } 02904 } 02905 02906 Worklist.Zap(); 02907 return MadeIRChange; 02908 } 02909 02910 namespace { 02911 class InstCombinerLibCallSimplifier final : public LibCallSimplifier { 02912 InstCombiner *IC; 02913 public: 02914 InstCombinerLibCallSimplifier(const DataLayout *DL, 02915 const TargetLibraryInfo *TLI, 02916 InstCombiner *IC) 02917 : LibCallSimplifier(DL, TLI, EnableUnsafeFPShrink) { 02918 this->IC = IC; 02919 } 02920 02921 /// replaceAllUsesWith - override so that instruction replacement 02922 /// can be defined in terms of the instruction combiner framework. 02923 void replaceAllUsesWith(Instruction *I, Value *With) const override { 02924 IC->ReplaceInstUsesWith(*I, With); 02925 } 02926 }; 02927 } 02928 02929 bool InstCombiner::runOnFunction(Function &F) { 02930 if (skipOptnoneFunction(F)) 02931 return false; 02932 02933 AT = &getAnalysis<AssumptionTracker>(); 02934 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 02935 DL = DLP ? &DLP->getDataLayout() : nullptr; 02936 TLI = &getAnalysis<TargetLibraryInfo>(); 02937 02938 DominatorTreeWrapperPass *DTWP = 02939 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 02940 DT = DTWP ? &DTWP->getDomTree() : nullptr; 02941 02942 // Minimizing size? 02943 MinimizeSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, 02944 Attribute::MinSize); 02945 02946 /// Builder - This is an IRBuilder that automatically inserts new 02947 /// instructions into the worklist when they are created. 02948 IRBuilder<true, TargetFolder, InstCombineIRInserter> 02949 TheBuilder(F.getContext(), TargetFolder(DL), 02950 InstCombineIRInserter(Worklist, AT)); 02951 Builder = &TheBuilder; 02952 02953 InstCombinerLibCallSimplifier TheSimplifier(DL, TLI, this); 02954 Simplifier = &TheSimplifier; 02955 02956 bool EverMadeChange = false; 02957 02958 // Lower dbg.declare intrinsics otherwise their value may be clobbered 02959 // by instcombiner. 02960 EverMadeChange = LowerDbgDeclare(F); 02961 02962 // Iterate while there is work to do. 02963 unsigned Iteration = 0; 02964 while (DoOneIteration(F, Iteration++)) 02965 EverMadeChange = true; 02966 02967 Builder = nullptr; 02968 return EverMadeChange; 02969 } 02970 02971 FunctionPass *llvm::createInstructionCombiningPass() { 02972 return new InstCombiner(); 02973 }