LLVM API Documentation
00001 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This file implements routines for folding instructions into simpler forms 00011 // that do not require creating new instructions. This does constant folding 00012 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either 00013 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value 00014 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been 00015 // simplified: This is usually true and assuming it simplifies the logic (if 00016 // they have not been simplified then results are correct but maybe suboptimal). 00017 // 00018 //===----------------------------------------------------------------------===// 00019 00020 #include "llvm/Analysis/InstructionSimplify.h" 00021 #include "llvm/ADT/SetVector.h" 00022 #include "llvm/ADT/Statistic.h" 00023 #include "llvm/Analysis/ConstantFolding.h" 00024 #include "llvm/Analysis/MemoryBuiltins.h" 00025 #include "llvm/Analysis/ValueTracking.h" 00026 #include "llvm/IR/ConstantRange.h" 00027 #include "llvm/IR/DataLayout.h" 00028 #include "llvm/IR/Dominators.h" 00029 #include "llvm/IR/GetElementPtrTypeIterator.h" 00030 #include "llvm/IR/GlobalAlias.h" 00031 #include "llvm/IR/Operator.h" 00032 #include "llvm/IR/PatternMatch.h" 00033 #include "llvm/IR/ValueHandle.h" 00034 using namespace llvm; 00035 using namespace llvm::PatternMatch; 00036 00037 #define DEBUG_TYPE "instsimplify" 00038 00039 enum { RecursionLimit = 3 }; 00040 00041 STATISTIC(NumExpand, "Number of expansions"); 00042 STATISTIC(NumReassoc, "Number of reassociations"); 00043 00044 namespace { 00045 struct Query { 00046 const DataLayout *DL; 00047 const TargetLibraryInfo *TLI; 00048 const DominatorTree *DT; 00049 AssumptionTracker *AT; 00050 const Instruction *CxtI; 00051 00052 Query(const DataLayout *DL, const TargetLibraryInfo *tli, 00053 const DominatorTree *dt, AssumptionTracker *at = nullptr, 00054 const Instruction *cxti = nullptr) 00055 : DL(DL), TLI(tli), DT(dt), AT(at), CxtI(cxti) {} 00056 }; 00057 } // end anonymous namespace 00058 00059 static Value *SimplifyAndInst(Value *, Value *, const Query &, unsigned); 00060 static Value *SimplifyBinOp(unsigned, Value *, Value *, const Query &, 00061 unsigned); 00062 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const Query &, 00063 unsigned); 00064 static Value *SimplifyOrInst(Value *, Value *, const Query &, unsigned); 00065 static Value *SimplifyXorInst(Value *, Value *, const Query &, unsigned); 00066 static Value *SimplifyTruncInst(Value *, Type *, const Query &, unsigned); 00067 00068 /// getFalse - For a boolean type, or a vector of boolean type, return false, or 00069 /// a vector with every element false, as appropriate for the type. 00070 static Constant *getFalse(Type *Ty) { 00071 assert(Ty->getScalarType()->isIntegerTy(1) && 00072 "Expected i1 type or a vector of i1!"); 00073 return Constant::getNullValue(Ty); 00074 } 00075 00076 /// getTrue - For a boolean type, or a vector of boolean type, return true, or 00077 /// a vector with every element true, as appropriate for the type. 00078 static Constant *getTrue(Type *Ty) { 00079 assert(Ty->getScalarType()->isIntegerTy(1) && 00080 "Expected i1 type or a vector of i1!"); 00081 return Constant::getAllOnesValue(Ty); 00082 } 00083 00084 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"? 00085 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS, 00086 Value *RHS) { 00087 CmpInst *Cmp = dyn_cast<CmpInst>(V); 00088 if (!Cmp) 00089 return false; 00090 CmpInst::Predicate CPred = Cmp->getPredicate(); 00091 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1); 00092 if (CPred == Pred && CLHS == LHS && CRHS == RHS) 00093 return true; 00094 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS && 00095 CRHS == LHS; 00096 } 00097 00098 /// ValueDominatesPHI - Does the given value dominate the specified phi node? 00099 static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) { 00100 Instruction *I = dyn_cast<Instruction>(V); 00101 if (!I) 00102 // Arguments and constants dominate all instructions. 00103 return true; 00104 00105 // If we are processing instructions (and/or basic blocks) that have not been 00106 // fully added to a function, the parent nodes may still be null. Simply 00107 // return the conservative answer in these cases. 00108 if (!I->getParent() || !P->getParent() || !I->getParent()->getParent()) 00109 return false; 00110 00111 // If we have a DominatorTree then do a precise test. 00112 if (DT) { 00113 if (!DT->isReachableFromEntry(P->getParent())) 00114 return true; 00115 if (!DT->isReachableFromEntry(I->getParent())) 00116 return false; 00117 return DT->dominates(I, P); 00118 } 00119 00120 // Otherwise, if the instruction is in the entry block, and is not an invoke, 00121 // then it obviously dominates all phi nodes. 00122 if (I->getParent() == &I->getParent()->getParent()->getEntryBlock() && 00123 !isa<InvokeInst>(I)) 00124 return true; 00125 00126 return false; 00127 } 00128 00129 /// ExpandBinOp - Simplify "A op (B op' C)" by distributing op over op', turning 00130 /// it into "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is 00131 /// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS. 00132 /// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)". 00133 /// Returns the simplified value, or null if no simplification was performed. 00134 static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS, 00135 unsigned OpcToExpand, const Query &Q, 00136 unsigned MaxRecurse) { 00137 Instruction::BinaryOps OpcodeToExpand = (Instruction::BinaryOps)OpcToExpand; 00138 // Recursion is always used, so bail out at once if we already hit the limit. 00139 if (!MaxRecurse--) 00140 return nullptr; 00141 00142 // Check whether the expression has the form "(A op' B) op C". 00143 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS)) 00144 if (Op0->getOpcode() == OpcodeToExpand) { 00145 // It does! Try turning it into "(A op C) op' (B op C)". 00146 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 00147 // Do "A op C" and "B op C" both simplify? 00148 if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) 00149 if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { 00150 // They do! Return "L op' R" if it simplifies or is already available. 00151 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS. 00152 if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand) 00153 && L == B && R == A)) { 00154 ++NumExpand; 00155 return LHS; 00156 } 00157 // Otherwise return "L op' R" if it simplifies. 00158 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) { 00159 ++NumExpand; 00160 return V; 00161 } 00162 } 00163 } 00164 00165 // Check whether the expression has the form "A op (B op' C)". 00166 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS)) 00167 if (Op1->getOpcode() == OpcodeToExpand) { 00168 // It does! Try turning it into "(A op B) op' (A op C)". 00169 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 00170 // Do "A op B" and "A op C" both simplify? 00171 if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) 00172 if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) { 00173 // They do! Return "L op' R" if it simplifies or is already available. 00174 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS. 00175 if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand) 00176 && L == C && R == B)) { 00177 ++NumExpand; 00178 return RHS; 00179 } 00180 // Otherwise return "L op' R" if it simplifies. 00181 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) { 00182 ++NumExpand; 00183 return V; 00184 } 00185 } 00186 } 00187 00188 return nullptr; 00189 } 00190 00191 /// SimplifyAssociativeBinOp - Generic simplifications for associative binary 00192 /// operations. Returns the simpler value, or null if none was found. 00193 static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS, 00194 const Query &Q, unsigned MaxRecurse) { 00195 Instruction::BinaryOps Opcode = (Instruction::BinaryOps)Opc; 00196 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!"); 00197 00198 // Recursion is always used, so bail out at once if we already hit the limit. 00199 if (!MaxRecurse--) 00200 return nullptr; 00201 00202 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 00203 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 00204 00205 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely. 00206 if (Op0 && Op0->getOpcode() == Opcode) { 00207 Value *A = Op0->getOperand(0); 00208 Value *B = Op0->getOperand(1); 00209 Value *C = RHS; 00210 00211 // Does "B op C" simplify? 00212 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { 00213 // It does! Return "A op V" if it simplifies or is already available. 00214 // If V equals B then "A op V" is just the LHS. 00215 if (V == B) return LHS; 00216 // Otherwise return "A op V" if it simplifies. 00217 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) { 00218 ++NumReassoc; 00219 return W; 00220 } 00221 } 00222 } 00223 00224 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely. 00225 if (Op1 && Op1->getOpcode() == Opcode) { 00226 Value *A = LHS; 00227 Value *B = Op1->getOperand(0); 00228 Value *C = Op1->getOperand(1); 00229 00230 // Does "A op B" simplify? 00231 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) { 00232 // It does! Return "V op C" if it simplifies or is already available. 00233 // If V equals B then "V op C" is just the RHS. 00234 if (V == B) return RHS; 00235 // Otherwise return "V op C" if it simplifies. 00236 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) { 00237 ++NumReassoc; 00238 return W; 00239 } 00240 } 00241 } 00242 00243 // The remaining transforms require commutativity as well as associativity. 00244 if (!Instruction::isCommutative(Opcode)) 00245 return nullptr; 00246 00247 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely. 00248 if (Op0 && Op0->getOpcode() == Opcode) { 00249 Value *A = Op0->getOperand(0); 00250 Value *B = Op0->getOperand(1); 00251 Value *C = RHS; 00252 00253 // Does "C op A" simplify? 00254 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { 00255 // It does! Return "V op B" if it simplifies or is already available. 00256 // If V equals A then "V op B" is just the LHS. 00257 if (V == A) return LHS; 00258 // Otherwise return "V op B" if it simplifies. 00259 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) { 00260 ++NumReassoc; 00261 return W; 00262 } 00263 } 00264 } 00265 00266 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely. 00267 if (Op1 && Op1->getOpcode() == Opcode) { 00268 Value *A = LHS; 00269 Value *B = Op1->getOperand(0); 00270 Value *C = Op1->getOperand(1); 00271 00272 // Does "C op A" simplify? 00273 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { 00274 // It does! Return "B op V" if it simplifies or is already available. 00275 // If V equals C then "B op V" is just the RHS. 00276 if (V == C) return RHS; 00277 // Otherwise return "B op V" if it simplifies. 00278 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) { 00279 ++NumReassoc; 00280 return W; 00281 } 00282 } 00283 } 00284 00285 return nullptr; 00286 } 00287 00288 /// ThreadBinOpOverSelect - In the case of a binary operation with a select 00289 /// instruction as an operand, try to simplify the binop by seeing whether 00290 /// evaluating it on both branches of the select results in the same value. 00291 /// Returns the common value if so, otherwise returns null. 00292 static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS, 00293 const Query &Q, unsigned MaxRecurse) { 00294 // Recursion is always used, so bail out at once if we already hit the limit. 00295 if (!MaxRecurse--) 00296 return nullptr; 00297 00298 SelectInst *SI; 00299 if (isa<SelectInst>(LHS)) { 00300 SI = cast<SelectInst>(LHS); 00301 } else { 00302 assert(isa<SelectInst>(RHS) && "No select instruction operand!"); 00303 SI = cast<SelectInst>(RHS); 00304 } 00305 00306 // Evaluate the BinOp on the true and false branches of the select. 00307 Value *TV; 00308 Value *FV; 00309 if (SI == LHS) { 00310 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse); 00311 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse); 00312 } else { 00313 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse); 00314 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse); 00315 } 00316 00317 // If they simplified to the same value, then return the common value. 00318 // If they both failed to simplify then return null. 00319 if (TV == FV) 00320 return TV; 00321 00322 // If one branch simplified to undef, return the other one. 00323 if (TV && isa<UndefValue>(TV)) 00324 return FV; 00325 if (FV && isa<UndefValue>(FV)) 00326 return TV; 00327 00328 // If applying the operation did not change the true and false select values, 00329 // then the result of the binop is the select itself. 00330 if (TV == SI->getTrueValue() && FV == SI->getFalseValue()) 00331 return SI; 00332 00333 // If one branch simplified and the other did not, and the simplified 00334 // value is equal to the unsimplified one, return the simplified value. 00335 // For example, select (cond, X, X & Z) & Z -> X & Z. 00336 if ((FV && !TV) || (TV && !FV)) { 00337 // Check that the simplified value has the form "X op Y" where "op" is the 00338 // same as the original operation. 00339 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV); 00340 if (Simplified && Simplified->getOpcode() == Opcode) { 00341 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS". 00342 // We already know that "op" is the same as for the simplified value. See 00343 // if the operands match too. If so, return the simplified value. 00344 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue(); 00345 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS; 00346 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch; 00347 if (Simplified->getOperand(0) == UnsimplifiedLHS && 00348 Simplified->getOperand(1) == UnsimplifiedRHS) 00349 return Simplified; 00350 if (Simplified->isCommutative() && 00351 Simplified->getOperand(1) == UnsimplifiedLHS && 00352 Simplified->getOperand(0) == UnsimplifiedRHS) 00353 return Simplified; 00354 } 00355 } 00356 00357 return nullptr; 00358 } 00359 00360 /// ThreadCmpOverSelect - In the case of a comparison with a select instruction, 00361 /// try to simplify the comparison by seeing whether both branches of the select 00362 /// result in the same value. Returns the common value if so, otherwise returns 00363 /// null. 00364 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS, 00365 Value *RHS, const Query &Q, 00366 unsigned MaxRecurse) { 00367 // Recursion is always used, so bail out at once if we already hit the limit. 00368 if (!MaxRecurse--) 00369 return nullptr; 00370 00371 // Make sure the select is on the LHS. 00372 if (!isa<SelectInst>(LHS)) { 00373 std::swap(LHS, RHS); 00374 Pred = CmpInst::getSwappedPredicate(Pred); 00375 } 00376 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!"); 00377 SelectInst *SI = cast<SelectInst>(LHS); 00378 Value *Cond = SI->getCondition(); 00379 Value *TV = SI->getTrueValue(); 00380 Value *FV = SI->getFalseValue(); 00381 00382 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it. 00383 // Does "cmp TV, RHS" simplify? 00384 Value *TCmp = SimplifyCmpInst(Pred, TV, RHS, Q, MaxRecurse); 00385 if (TCmp == Cond) { 00386 // It not only simplified, it simplified to the select condition. Replace 00387 // it with 'true'. 00388 TCmp = getTrue(Cond->getType()); 00389 } else if (!TCmp) { 00390 // It didn't simplify. However if "cmp TV, RHS" is equal to the select 00391 // condition then we can replace it with 'true'. Otherwise give up. 00392 if (!isSameCompare(Cond, Pred, TV, RHS)) 00393 return nullptr; 00394 TCmp = getTrue(Cond->getType()); 00395 } 00396 00397 // Does "cmp FV, RHS" simplify? 00398 Value *FCmp = SimplifyCmpInst(Pred, FV, RHS, Q, MaxRecurse); 00399 if (FCmp == Cond) { 00400 // It not only simplified, it simplified to the select condition. Replace 00401 // it with 'false'. 00402 FCmp = getFalse(Cond->getType()); 00403 } else if (!FCmp) { 00404 // It didn't simplify. However if "cmp FV, RHS" is equal to the select 00405 // condition then we can replace it with 'false'. Otherwise give up. 00406 if (!isSameCompare(Cond, Pred, FV, RHS)) 00407 return nullptr; 00408 FCmp = getFalse(Cond->getType()); 00409 } 00410 00411 // If both sides simplified to the same value, then use it as the result of 00412 // the original comparison. 00413 if (TCmp == FCmp) 00414 return TCmp; 00415 00416 // The remaining cases only make sense if the select condition has the same 00417 // type as the result of the comparison, so bail out if this is not so. 00418 if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy()) 00419 return nullptr; 00420 // If the false value simplified to false, then the result of the compare 00421 // is equal to "Cond && TCmp". This also catches the case when the false 00422 // value simplified to false and the true value to true, returning "Cond". 00423 if (match(FCmp, m_Zero())) 00424 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse)) 00425 return V; 00426 // If the true value simplified to true, then the result of the compare 00427 // is equal to "Cond || FCmp". 00428 if (match(TCmp, m_One())) 00429 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse)) 00430 return V; 00431 // Finally, if the false value simplified to true and the true value to 00432 // false, then the result of the compare is equal to "!Cond". 00433 if (match(FCmp, m_One()) && match(TCmp, m_Zero())) 00434 if (Value *V = 00435 SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()), 00436 Q, MaxRecurse)) 00437 return V; 00438 00439 return nullptr; 00440 } 00441 00442 /// ThreadBinOpOverPHI - In the case of a binary operation with an operand that 00443 /// is a PHI instruction, try to simplify the binop by seeing whether evaluating 00444 /// it on the incoming phi values yields the same result for every value. If so 00445 /// returns the common value, otherwise returns null. 00446 static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS, 00447 const Query &Q, unsigned MaxRecurse) { 00448 // Recursion is always used, so bail out at once if we already hit the limit. 00449 if (!MaxRecurse--) 00450 return nullptr; 00451 00452 PHINode *PI; 00453 if (isa<PHINode>(LHS)) { 00454 PI = cast<PHINode>(LHS); 00455 // Bail out if RHS and the phi may be mutually interdependent due to a loop. 00456 if (!ValueDominatesPHI(RHS, PI, Q.DT)) 00457 return nullptr; 00458 } else { 00459 assert(isa<PHINode>(RHS) && "No PHI instruction operand!"); 00460 PI = cast<PHINode>(RHS); 00461 // Bail out if LHS and the phi may be mutually interdependent due to a loop. 00462 if (!ValueDominatesPHI(LHS, PI, Q.DT)) 00463 return nullptr; 00464 } 00465 00466 // Evaluate the BinOp on the incoming phi values. 00467 Value *CommonValue = nullptr; 00468 for (unsigned i = 0, e = PI->getNumIncomingValues(); i != e; ++i) { 00469 Value *Incoming = PI->getIncomingValue(i); 00470 // If the incoming value is the phi node itself, it can safely be skipped. 00471 if (Incoming == PI) continue; 00472 Value *V = PI == LHS ? 00473 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) : 00474 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse); 00475 // If the operation failed to simplify, or simplified to a different value 00476 // to previously, then give up. 00477 if (!V || (CommonValue && V != CommonValue)) 00478 return nullptr; 00479 CommonValue = V; 00480 } 00481 00482 return CommonValue; 00483 } 00484 00485 /// ThreadCmpOverPHI - In the case of a comparison with a PHI instruction, try 00486 /// try to simplify the comparison by seeing whether comparing with all of the 00487 /// incoming phi values yields the same result every time. If so returns the 00488 /// common result, otherwise returns null. 00489 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS, 00490 const Query &Q, unsigned MaxRecurse) { 00491 // Recursion is always used, so bail out at once if we already hit the limit. 00492 if (!MaxRecurse--) 00493 return nullptr; 00494 00495 // Make sure the phi is on the LHS. 00496 if (!isa<PHINode>(LHS)) { 00497 std::swap(LHS, RHS); 00498 Pred = CmpInst::getSwappedPredicate(Pred); 00499 } 00500 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!"); 00501 PHINode *PI = cast<PHINode>(LHS); 00502 00503 // Bail out if RHS and the phi may be mutually interdependent due to a loop. 00504 if (!ValueDominatesPHI(RHS, PI, Q.DT)) 00505 return nullptr; 00506 00507 // Evaluate the BinOp on the incoming phi values. 00508 Value *CommonValue = nullptr; 00509 for (unsigned i = 0, e = PI->getNumIncomingValues(); i != e; ++i) { 00510 Value *Incoming = PI->getIncomingValue(i); 00511 // If the incoming value is the phi node itself, it can safely be skipped. 00512 if (Incoming == PI) continue; 00513 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q, MaxRecurse); 00514 // If the operation failed to simplify, or simplified to a different value 00515 // to previously, then give up. 00516 if (!V || (CommonValue && V != CommonValue)) 00517 return nullptr; 00518 CommonValue = V; 00519 } 00520 00521 return CommonValue; 00522 } 00523 00524 /// SimplifyAddInst - Given operands for an Add, see if we can 00525 /// fold the result. If not, this returns null. 00526 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 00527 const Query &Q, unsigned MaxRecurse) { 00528 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 00529 if (Constant *CRHS = dyn_cast<Constant>(Op1)) { 00530 Constant *Ops[] = { CLHS, CRHS }; 00531 return ConstantFoldInstOperands(Instruction::Add, CLHS->getType(), Ops, 00532 Q.DL, Q.TLI); 00533 } 00534 00535 // Canonicalize the constant to the RHS. 00536 std::swap(Op0, Op1); 00537 } 00538 00539 // X + undef -> undef 00540 if (match(Op1, m_Undef())) 00541 return Op1; 00542 00543 // X + 0 -> X 00544 if (match(Op1, m_Zero())) 00545 return Op0; 00546 00547 // X + (Y - X) -> Y 00548 // (Y - X) + X -> Y 00549 // Eg: X + -X -> 0 00550 Value *Y = nullptr; 00551 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) || 00552 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1)))) 00553 return Y; 00554 00555 // X + ~X -> -1 since ~X = -X-1 00556 if (match(Op0, m_Not(m_Specific(Op1))) || 00557 match(Op1, m_Not(m_Specific(Op0)))) 00558 return Constant::getAllOnesValue(Op0->getType()); 00559 00560 /// i1 add -> xor. 00561 if (MaxRecurse && Op0->getType()->isIntegerTy(1)) 00562 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) 00563 return V; 00564 00565 // Try some generic simplifications for associative operations. 00566 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, 00567 MaxRecurse)) 00568 return V; 00569 00570 // Threading Add over selects and phi nodes is pointless, so don't bother. 00571 // Threading over the select in "A + select(cond, B, C)" means evaluating 00572 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and 00573 // only if B and C are equal. If B and C are equal then (since we assume 00574 // that operands have already been simplified) "select(cond, B, C)" should 00575 // have been simplified to the common value of B and C already. Analysing 00576 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly 00577 // for threading over phi nodes. 00578 00579 return nullptr; 00580 } 00581 00582 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 00583 const DataLayout *DL, const TargetLibraryInfo *TLI, 00584 const DominatorTree *DT, AssumptionTracker *AT, 00585 const Instruction *CxtI) { 00586 return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, 00587 Query (DL, TLI, DT, AT, CxtI), RecursionLimit); 00588 } 00589 00590 /// \brief Compute the base pointer and cumulative constant offsets for V. 00591 /// 00592 /// This strips all constant offsets off of V, leaving it the base pointer, and 00593 /// accumulates the total constant offset applied in the returned constant. It 00594 /// returns 0 if V is not a pointer, and returns the constant '0' if there are 00595 /// no constant offsets applied. 00596 /// 00597 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't 00598 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc. 00599 /// folding. 00600 static Constant *stripAndComputeConstantOffsets(const DataLayout *DL, 00601 Value *&V, 00602 bool AllowNonInbounds = false) { 00603 assert(V->getType()->getScalarType()->isPointerTy()); 00604 00605 // Without DataLayout, just be conservative for now. Theoretically, more could 00606 // be done in this case. 00607 if (!DL) 00608 return ConstantInt::get(IntegerType::get(V->getContext(), 64), 0); 00609 00610 Type *IntPtrTy = DL->getIntPtrType(V->getType())->getScalarType(); 00611 APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth()); 00612 00613 // Even though we don't look through PHI nodes, we could be called on an 00614 // instruction in an unreachable block, which may be on a cycle. 00615 SmallPtrSet<Value *, 4> Visited; 00616 Visited.insert(V); 00617 do { 00618 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 00619 if ((!AllowNonInbounds && !GEP->isInBounds()) || 00620 !GEP->accumulateConstantOffset(*DL, Offset)) 00621 break; 00622 V = GEP->getPointerOperand(); 00623 } else if (Operator::getOpcode(V) == Instruction::BitCast) { 00624 V = cast<Operator>(V)->getOperand(0); 00625 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 00626 if (GA->mayBeOverridden()) 00627 break; 00628 V = GA->getAliasee(); 00629 } else { 00630 break; 00631 } 00632 assert(V->getType()->getScalarType()->isPointerTy() && 00633 "Unexpected operand type!"); 00634 } while (Visited.insert(V)); 00635 00636 Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset); 00637 if (V->getType()->isVectorTy()) 00638 return ConstantVector::getSplat(V->getType()->getVectorNumElements(), 00639 OffsetIntPtr); 00640 return OffsetIntPtr; 00641 } 00642 00643 /// \brief Compute the constant difference between two pointer values. 00644 /// If the difference is not a constant, returns zero. 00645 static Constant *computePointerDifference(const DataLayout *DL, 00646 Value *LHS, Value *RHS) { 00647 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); 00648 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); 00649 00650 // If LHS and RHS are not related via constant offsets to the same base 00651 // value, there is nothing we can do here. 00652 if (LHS != RHS) 00653 return nullptr; 00654 00655 // Otherwise, the difference of LHS - RHS can be computed as: 00656 // LHS - RHS 00657 // = (LHSOffset + Base) - (RHSOffset + Base) 00658 // = LHSOffset - RHSOffset 00659 return ConstantExpr::getSub(LHSOffset, RHSOffset); 00660 } 00661 00662 /// SimplifySubInst - Given operands for a Sub, see if we can 00663 /// fold the result. If not, this returns null. 00664 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 00665 const Query &Q, unsigned MaxRecurse) { 00666 if (Constant *CLHS = dyn_cast<Constant>(Op0)) 00667 if (Constant *CRHS = dyn_cast<Constant>(Op1)) { 00668 Constant *Ops[] = { CLHS, CRHS }; 00669 return ConstantFoldInstOperands(Instruction::Sub, CLHS->getType(), 00670 Ops, Q.DL, Q.TLI); 00671 } 00672 00673 // X - undef -> undef 00674 // undef - X -> undef 00675 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 00676 return UndefValue::get(Op0->getType()); 00677 00678 // X - 0 -> X 00679 if (match(Op1, m_Zero())) 00680 return Op0; 00681 00682 // X - X -> 0 00683 if (Op0 == Op1) 00684 return Constant::getNullValue(Op0->getType()); 00685 00686 // X - (0 - Y) -> X if the second sub is NUW. 00687 // If Y != 0, 0 - Y is a poison value. 00688 // If Y == 0, 0 - Y simplifies to 0. 00689 if (BinaryOperator::isNeg(Op1)) { 00690 if (const auto *BO = dyn_cast<BinaryOperator>(Op1)) { 00691 assert(BO->getOpcode() == Instruction::Sub && 00692 "Expected a subtraction operator!"); 00693 if (BO->hasNoUnsignedWrap()) 00694 return Op0; 00695 } 00696 } 00697 00698 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies. 00699 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X 00700 Value *X = nullptr, *Y = nullptr, *Z = Op1; 00701 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z 00702 // See if "V === Y - Z" simplifies. 00703 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1)) 00704 // It does! Now see if "X + V" simplifies. 00705 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) { 00706 // It does, we successfully reassociated! 00707 ++NumReassoc; 00708 return W; 00709 } 00710 // See if "V === X - Z" simplifies. 00711 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) 00712 // It does! Now see if "Y + V" simplifies. 00713 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) { 00714 // It does, we successfully reassociated! 00715 ++NumReassoc; 00716 return W; 00717 } 00718 } 00719 00720 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies. 00721 // For example, X - (X + 1) -> -1 00722 X = Op0; 00723 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z) 00724 // See if "V === X - Y" simplifies. 00725 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) 00726 // It does! Now see if "V - Z" simplifies. 00727 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) { 00728 // It does, we successfully reassociated! 00729 ++NumReassoc; 00730 return W; 00731 } 00732 // See if "V === X - Z" simplifies. 00733 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) 00734 // It does! Now see if "V - Y" simplifies. 00735 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) { 00736 // It does, we successfully reassociated! 00737 ++NumReassoc; 00738 return W; 00739 } 00740 } 00741 00742 // Z - (X - Y) -> (Z - X) + Y if everything simplifies. 00743 // For example, X - (X - Y) -> Y. 00744 Z = Op0; 00745 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y) 00746 // See if "V === Z - X" simplifies. 00747 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1)) 00748 // It does! Now see if "V + Y" simplifies. 00749 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) { 00750 // It does, we successfully reassociated! 00751 ++NumReassoc; 00752 return W; 00753 } 00754 00755 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies. 00756 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) && 00757 match(Op1, m_Trunc(m_Value(Y)))) 00758 if (X->getType() == Y->getType()) 00759 // See if "V === X - Y" simplifies. 00760 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) 00761 // It does! Now see if "trunc V" simplifies. 00762 if (Value *W = SimplifyTruncInst(V, Op0->getType(), Q, MaxRecurse-1)) 00763 // It does, return the simplified "trunc V". 00764 return W; 00765 00766 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...). 00767 if (match(Op0, m_PtrToInt(m_Value(X))) && 00768 match(Op1, m_PtrToInt(m_Value(Y)))) 00769 if (Constant *Result = computePointerDifference(Q.DL, X, Y)) 00770 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true); 00771 00772 // i1 sub -> xor. 00773 if (MaxRecurse && Op0->getType()->isIntegerTy(1)) 00774 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) 00775 return V; 00776 00777 // Threading Sub over selects and phi nodes is pointless, so don't bother. 00778 // Threading over the select in "A - select(cond, B, C)" means evaluating 00779 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and 00780 // only if B and C are equal. If B and C are equal then (since we assume 00781 // that operands have already been simplified) "select(cond, B, C)" should 00782 // have been simplified to the common value of B and C already. Analysing 00783 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly 00784 // for threading over phi nodes. 00785 00786 return nullptr; 00787 } 00788 00789 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 00790 const DataLayout *DL, const TargetLibraryInfo *TLI, 00791 const DominatorTree *DT, AssumptionTracker *AT, 00792 const Instruction *CxtI) { 00793 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, 00794 Query (DL, TLI, DT, AT, CxtI), RecursionLimit); 00795 } 00796 00797 /// Given operands for an FAdd, see if we can fold the result. If not, this 00798 /// returns null. 00799 static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, 00800 const Query &Q, unsigned MaxRecurse) { 00801 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 00802 if (Constant *CRHS = dyn_cast<Constant>(Op1)) { 00803 Constant *Ops[] = { CLHS, CRHS }; 00804 return ConstantFoldInstOperands(Instruction::FAdd, CLHS->getType(), 00805 Ops, Q.DL, Q.TLI); 00806 } 00807 00808 // Canonicalize the constant to the RHS. 00809 std::swap(Op0, Op1); 00810 } 00811 00812 // fadd X, -0 ==> X 00813 if (match(Op1, m_NegZero())) 00814 return Op0; 00815 00816 // fadd X, 0 ==> X, when we know X is not -0 00817 if (match(Op1, m_Zero()) && 00818 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0))) 00819 return Op0; 00820 00821 // fadd [nnan ninf] X, (fsub [nnan ninf] 0, X) ==> 0 00822 // where nnan and ninf have to occur at least once somewhere in this 00823 // expression 00824 Value *SubOp = nullptr; 00825 if (match(Op1, m_FSub(m_AnyZero(), m_Specific(Op0)))) 00826 SubOp = Op1; 00827 else if (match(Op0, m_FSub(m_AnyZero(), m_Specific(Op1)))) 00828 SubOp = Op0; 00829 if (SubOp) { 00830 Instruction *FSub = cast<Instruction>(SubOp); 00831 if ((FMF.noNaNs() || FSub->hasNoNaNs()) && 00832 (FMF.noInfs() || FSub->hasNoInfs())) 00833 return Constant::getNullValue(Op0->getType()); 00834 } 00835 00836 return nullptr; 00837 } 00838 00839 /// Given operands for an FSub, see if we can fold the result. If not, this 00840 /// returns null. 00841 static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, 00842 const Query &Q, unsigned MaxRecurse) { 00843 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 00844 if (Constant *CRHS = dyn_cast<Constant>(Op1)) { 00845 Constant *Ops[] = { CLHS, CRHS }; 00846 return ConstantFoldInstOperands(Instruction::FSub, CLHS->getType(), 00847 Ops, Q.DL, Q.TLI); 00848 } 00849 } 00850 00851 // fsub X, 0 ==> X 00852 if (match(Op1, m_Zero())) 00853 return Op0; 00854 00855 // fsub X, -0 ==> X, when we know X is not -0 00856 if (match(Op1, m_NegZero()) && 00857 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0))) 00858 return Op0; 00859 00860 // fsub 0, (fsub -0.0, X) ==> X 00861 Value *X; 00862 if (match(Op0, m_AnyZero())) { 00863 if (match(Op1, m_FSub(m_NegZero(), m_Value(X)))) 00864 return X; 00865 if (FMF.noSignedZeros() && match(Op1, m_FSub(m_AnyZero(), m_Value(X)))) 00866 return X; 00867 } 00868 00869 // fsub nnan ninf x, x ==> 0.0 00870 if (FMF.noNaNs() && FMF.noInfs() && Op0 == Op1) 00871 return Constant::getNullValue(Op0->getType()); 00872 00873 return nullptr; 00874 } 00875 00876 /// Given the operands for an FMul, see if we can fold the result 00877 static Value *SimplifyFMulInst(Value *Op0, Value *Op1, 00878 FastMathFlags FMF, 00879 const Query &Q, 00880 unsigned MaxRecurse) { 00881 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 00882 if (Constant *CRHS = dyn_cast<Constant>(Op1)) { 00883 Constant *Ops[] = { CLHS, CRHS }; 00884 return ConstantFoldInstOperands(Instruction::FMul, CLHS->getType(), 00885 Ops, Q.DL, Q.TLI); 00886 } 00887 00888 // Canonicalize the constant to the RHS. 00889 std::swap(Op0, Op1); 00890 } 00891 00892 // fmul X, 1.0 ==> X 00893 if (match(Op1, m_FPOne())) 00894 return Op0; 00895 00896 // fmul nnan nsz X, 0 ==> 0 00897 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZero())) 00898 return Op1; 00899 00900 return nullptr; 00901 } 00902 00903 /// SimplifyMulInst - Given operands for a Mul, see if we can 00904 /// fold the result. If not, this returns null. 00905 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q, 00906 unsigned MaxRecurse) { 00907 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 00908 if (Constant *CRHS = dyn_cast<Constant>(Op1)) { 00909 Constant *Ops[] = { CLHS, CRHS }; 00910 return ConstantFoldInstOperands(Instruction::Mul, CLHS->getType(), 00911 Ops, Q.DL, Q.TLI); 00912 } 00913 00914 // Canonicalize the constant to the RHS. 00915 std::swap(Op0, Op1); 00916 } 00917 00918 // X * undef -> 0 00919 if (match(Op1, m_Undef())) 00920 return Constant::getNullValue(Op0->getType()); 00921 00922 // X * 0 -> 0 00923 if (match(Op1, m_Zero())) 00924 return Op1; 00925 00926 // X * 1 -> X 00927 if (match(Op1, m_One())) 00928 return Op0; 00929 00930 // (X / Y) * Y -> X if the division is exact. 00931 Value *X = nullptr; 00932 if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y 00933 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y) 00934 return X; 00935 00936 // i1 mul -> and. 00937 if (MaxRecurse && Op0->getType()->isIntegerTy(1)) 00938 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1)) 00939 return V; 00940 00941 // Try some generic simplifications for associative operations. 00942 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, 00943 MaxRecurse)) 00944 return V; 00945 00946 // Mul distributes over Add. Try some generic simplifications based on this. 00947 if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add, 00948 Q, MaxRecurse)) 00949 return V; 00950 00951 // If the operation is with the result of a select instruction, check whether 00952 // operating on either branch of the select always yields the same value. 00953 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 00954 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, 00955 MaxRecurse)) 00956 return V; 00957 00958 // If the operation is with the result of a phi instruction, check whether 00959 // operating on all incoming values of the phi always yields the same value. 00960 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 00961 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, 00962 MaxRecurse)) 00963 return V; 00964 00965 return nullptr; 00966 } 00967 00968 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, 00969 const DataLayout *DL, const TargetLibraryInfo *TLI, 00970 const DominatorTree *DT, AssumptionTracker *AT, 00971 const Instruction *CxtI) { 00972 return ::SimplifyFAddInst(Op0, Op1, FMF, Query (DL, TLI, DT, AT, CxtI), 00973 RecursionLimit); 00974 } 00975 00976 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, 00977 const DataLayout *DL, const TargetLibraryInfo *TLI, 00978 const DominatorTree *DT, AssumptionTracker *AT, 00979 const Instruction *CxtI) { 00980 return ::SimplifyFSubInst(Op0, Op1, FMF, Query (DL, TLI, DT, AT, CxtI), 00981 RecursionLimit); 00982 } 00983 00984 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, 00985 FastMathFlags FMF, 00986 const DataLayout *DL, 00987 const TargetLibraryInfo *TLI, 00988 const DominatorTree *DT, 00989 AssumptionTracker *AT, 00990 const Instruction *CxtI) { 00991 return ::SimplifyFMulInst(Op0, Op1, FMF, Query (DL, TLI, DT, AT, CxtI), 00992 RecursionLimit); 00993 } 00994 00995 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *DL, 00996 const TargetLibraryInfo *TLI, 00997 const DominatorTree *DT, AssumptionTracker *AT, 00998 const Instruction *CxtI) { 00999 return ::SimplifyMulInst(Op0, Op1, Query (DL, TLI, DT, AT, CxtI), 01000 RecursionLimit); 01001 } 01002 01003 /// SimplifyDiv - Given operands for an SDiv or UDiv, see if we can 01004 /// fold the result. If not, this returns null. 01005 static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, 01006 const Query &Q, unsigned MaxRecurse) { 01007 if (Constant *C0 = dyn_cast<Constant>(Op0)) { 01008 if (Constant *C1 = dyn_cast<Constant>(Op1)) { 01009 Constant *Ops[] = { C0, C1 }; 01010 return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI); 01011 } 01012 } 01013 01014 bool isSigned = Opcode == Instruction::SDiv; 01015 01016 // X / undef -> undef 01017 if (match(Op1, m_Undef())) 01018 return Op1; 01019 01020 // undef / X -> 0 01021 if (match(Op0, m_Undef())) 01022 return Constant::getNullValue(Op0->getType()); 01023 01024 // 0 / X -> 0, we don't need to preserve faults! 01025 if (match(Op0, m_Zero())) 01026 return Op0; 01027 01028 // X / 1 -> X 01029 if (match(Op1, m_One())) 01030 return Op0; 01031 01032 if (Op0->getType()->isIntegerTy(1)) 01033 // It can't be division by zero, hence it must be division by one. 01034 return Op0; 01035 01036 // X / X -> 1 01037 if (Op0 == Op1) 01038 return ConstantInt::get(Op0->getType(), 1); 01039 01040 // (X * Y) / Y -> X if the multiplication does not overflow. 01041 Value *X = nullptr, *Y = nullptr; 01042 if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) { 01043 if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1 01044 OverflowingBinaryOperator *Mul = cast<OverflowingBinaryOperator>(Op0); 01045 // If the Mul knows it does not overflow, then we are good to go. 01046 if ((isSigned && Mul->hasNoSignedWrap()) || 01047 (!isSigned && Mul->hasNoUnsignedWrap())) 01048 return X; 01049 // If X has the form X = A / Y then X * Y cannot overflow. 01050 if (BinaryOperator *Div = dyn_cast<BinaryOperator>(X)) 01051 if (Div->getOpcode() == Opcode && Div->getOperand(1) == Y) 01052 return X; 01053 } 01054 01055 // (X rem Y) / Y -> 0 01056 if ((isSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || 01057 (!isSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1))))) 01058 return Constant::getNullValue(Op0->getType()); 01059 01060 // If the operation is with the result of a select instruction, check whether 01061 // operating on either branch of the select always yields the same value. 01062 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 01063 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 01064 return V; 01065 01066 // If the operation is with the result of a phi instruction, check whether 01067 // operating on all incoming values of the phi always yields the same value. 01068 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 01069 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 01070 return V; 01071 01072 return nullptr; 01073 } 01074 01075 /// SimplifySDivInst - Given operands for an SDiv, see if we can 01076 /// fold the result. If not, this returns null. 01077 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q, 01078 unsigned MaxRecurse) { 01079 if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse)) 01080 return V; 01081 01082 return nullptr; 01083 } 01084 01085 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *DL, 01086 const TargetLibraryInfo *TLI, 01087 const DominatorTree *DT, 01088 AssumptionTracker *AT, 01089 const Instruction *CxtI) { 01090 return ::SimplifySDivInst(Op0, Op1, Query (DL, TLI, DT, AT, CxtI), 01091 RecursionLimit); 01092 } 01093 01094 /// SimplifyUDivInst - Given operands for a UDiv, see if we can 01095 /// fold the result. If not, this returns null. 01096 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q, 01097 unsigned MaxRecurse) { 01098 if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse)) 01099 return V; 01100 01101 return nullptr; 01102 } 01103 01104 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *DL, 01105 const TargetLibraryInfo *TLI, 01106 const DominatorTree *DT, 01107 AssumptionTracker *AT, 01108 const Instruction *CxtI) { 01109 return ::SimplifyUDivInst(Op0, Op1, Query (DL, TLI, DT, AT, CxtI), 01110 RecursionLimit); 01111 } 01112 01113 static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const Query &Q, 01114 unsigned) { 01115 // undef / X -> undef (the undef could be a snan). 01116 if (match(Op0, m_Undef())) 01117 return Op0; 01118 01119 // X / undef -> undef 01120 if (match(Op1, m_Undef())) 01121 return Op1; 01122 01123 return nullptr; 01124 } 01125 01126 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *DL, 01127 const TargetLibraryInfo *TLI, 01128 const DominatorTree *DT, 01129 AssumptionTracker *AT, 01130 const Instruction *CxtI) { 01131 return ::SimplifyFDivInst(Op0, Op1, Query (DL, TLI, DT, AT, CxtI), 01132 RecursionLimit); 01133 } 01134 01135 /// SimplifyRem - Given operands for an SRem or URem, see if we can 01136 /// fold the result. If not, this returns null. 01137 static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, 01138 const Query &Q, unsigned MaxRecurse) { 01139 if (Constant *C0 = dyn_cast<Constant>(Op0)) { 01140 if (Constant *C1 = dyn_cast<Constant>(Op1)) { 01141 Constant *Ops[] = { C0, C1 }; 01142 return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI); 01143 } 01144 } 01145 01146 // X % undef -> undef 01147 if (match(Op1, m_Undef())) 01148 return Op1; 01149 01150 // undef % X -> 0 01151 if (match(Op0, m_Undef())) 01152 return Constant::getNullValue(Op0->getType()); 01153 01154 // 0 % X -> 0, we don't need to preserve faults! 01155 if (match(Op0, m_Zero())) 01156 return Op0; 01157 01158 // X % 0 -> undef, we don't need to preserve faults! 01159 if (match(Op1, m_Zero())) 01160 return UndefValue::get(Op0->getType()); 01161 01162 // X % 1 -> 0 01163 if (match(Op1, m_One())) 01164 return Constant::getNullValue(Op0->getType()); 01165 01166 if (Op0->getType()->isIntegerTy(1)) 01167 // It can't be remainder by zero, hence it must be remainder by one. 01168 return Constant::getNullValue(Op0->getType()); 01169 01170 // X % X -> 0 01171 if (Op0 == Op1) 01172 return Constant::getNullValue(Op0->getType()); 01173 01174 // (X % Y) % Y -> X % Y 01175 if ((Opcode == Instruction::SRem && 01176 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || 01177 (Opcode == Instruction::URem && 01178 match(Op0, m_URem(m_Value(), m_Specific(Op1))))) 01179 return Op0; 01180 01181 // If the operation is with the result of a select instruction, check whether 01182 // operating on either branch of the select always yields the same value. 01183 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 01184 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 01185 return V; 01186 01187 // If the operation is with the result of a phi instruction, check whether 01188 // operating on all incoming values of the phi always yields the same value. 01189 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 01190 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 01191 return V; 01192 01193 return nullptr; 01194 } 01195 01196 /// SimplifySRemInst - Given operands for an SRem, see if we can 01197 /// fold the result. If not, this returns null. 01198 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q, 01199 unsigned MaxRecurse) { 01200 if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse)) 01201 return V; 01202 01203 return nullptr; 01204 } 01205 01206 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *DL, 01207 const TargetLibraryInfo *TLI, 01208 const DominatorTree *DT, 01209 AssumptionTracker *AT, 01210 const Instruction *CxtI) { 01211 return ::SimplifySRemInst(Op0, Op1, Query (DL, TLI, DT, AT, CxtI), 01212 RecursionLimit); 01213 } 01214 01215 /// SimplifyURemInst - Given operands for a URem, see if we can 01216 /// fold the result. If not, this returns null. 01217 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q, 01218 unsigned MaxRecurse) { 01219 if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse)) 01220 return V; 01221 01222 return nullptr; 01223 } 01224 01225 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *DL, 01226 const TargetLibraryInfo *TLI, 01227 const DominatorTree *DT, 01228 AssumptionTracker *AT, 01229 const Instruction *CxtI) { 01230 return ::SimplifyURemInst(Op0, Op1, Query (DL, TLI, DT, AT, CxtI), 01231 RecursionLimit); 01232 } 01233 01234 static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const Query &, 01235 unsigned) { 01236 // undef % X -> undef (the undef could be a snan). 01237 if (match(Op0, m_Undef())) 01238 return Op0; 01239 01240 // X % undef -> undef 01241 if (match(Op1, m_Undef())) 01242 return Op1; 01243 01244 return nullptr; 01245 } 01246 01247 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *DL, 01248 const TargetLibraryInfo *TLI, 01249 const DominatorTree *DT, 01250 AssumptionTracker *AT, 01251 const Instruction *CxtI) { 01252 return ::SimplifyFRemInst(Op0, Op1, Query (DL, TLI, DT, AT, CxtI), 01253 RecursionLimit); 01254 } 01255 01256 /// isUndefShift - Returns true if a shift by \c Amount always yields undef. 01257 static bool isUndefShift(Value *Amount) { 01258 Constant *C = dyn_cast<Constant>(Amount); 01259 if (!C) 01260 return false; 01261 01262 // X shift by undef -> undef because it may shift by the bitwidth. 01263 if (isa<UndefValue>(C)) 01264 return true; 01265 01266 // Shifting by the bitwidth or more is undefined. 01267 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) 01268 if (CI->getValue().getLimitedValue() >= 01269 CI->getType()->getScalarSizeInBits()) 01270 return true; 01271 01272 // If all lanes of a vector shift are undefined the whole shift is. 01273 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) { 01274 for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I) 01275 if (!isUndefShift(C->getAggregateElement(I))) 01276 return false; 01277 return true; 01278 } 01279 01280 return false; 01281 } 01282 01283 /// SimplifyShift - Given operands for an Shl, LShr or AShr, see if we can 01284 /// fold the result. If not, this returns null. 01285 static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1, 01286 const Query &Q, unsigned MaxRecurse) { 01287 if (Constant *C0 = dyn_cast<Constant>(Op0)) { 01288 if (Constant *C1 = dyn_cast<Constant>(Op1)) { 01289 Constant *Ops[] = { C0, C1 }; 01290 return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI); 01291 } 01292 } 01293 01294 // 0 shift by X -> 0 01295 if (match(Op0, m_Zero())) 01296 return Op0; 01297 01298 // X shift by 0 -> X 01299 if (match(Op1, m_Zero())) 01300 return Op0; 01301 01302 // Fold undefined shifts. 01303 if (isUndefShift(Op1)) 01304 return UndefValue::get(Op0->getType()); 01305 01306 // If the operation is with the result of a select instruction, check whether 01307 // operating on either branch of the select always yields the same value. 01308 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 01309 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 01310 return V; 01311 01312 // If the operation is with the result of a phi instruction, check whether 01313 // operating on all incoming values of the phi always yields the same value. 01314 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 01315 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 01316 return V; 01317 01318 return nullptr; 01319 } 01320 01321 /// SimplifyShlInst - Given operands for an Shl, see if we can 01322 /// fold the result. If not, this returns null. 01323 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 01324 const Query &Q, unsigned MaxRecurse) { 01325 if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse)) 01326 return V; 01327 01328 // undef << X -> 0 01329 if (match(Op0, m_Undef())) 01330 return Constant::getNullValue(Op0->getType()); 01331 01332 // (X >> A) << A -> X 01333 Value *X; 01334 if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1))))) 01335 return X; 01336 return nullptr; 01337 } 01338 01339 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 01340 const DataLayout *DL, const TargetLibraryInfo *TLI, 01341 const DominatorTree *DT, AssumptionTracker *AT, 01342 const Instruction *CxtI) { 01343 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (DL, TLI, DT, AT, CxtI), 01344 RecursionLimit); 01345 } 01346 01347 /// SimplifyLShrInst - Given operands for an LShr, see if we can 01348 /// fold the result. If not, this returns null. 01349 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, 01350 const Query &Q, unsigned MaxRecurse) { 01351 if (Value *V = SimplifyShift(Instruction::LShr, Op0, Op1, Q, MaxRecurse)) 01352 return V; 01353 01354 // X >> X -> 0 01355 if (Op0 == Op1) 01356 return Constant::getNullValue(Op0->getType()); 01357 01358 // undef >>l X -> 0 01359 if (match(Op0, m_Undef())) 01360 return Constant::getNullValue(Op0->getType()); 01361 01362 // (X << A) >> A -> X 01363 Value *X; 01364 if (match(Op0, m_Shl(m_Value(X), m_Specific(Op1))) && 01365 cast<OverflowingBinaryOperator>(Op0)->hasNoUnsignedWrap()) 01366 return X; 01367 01368 return nullptr; 01369 } 01370 01371 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, 01372 const DataLayout *DL, 01373 const TargetLibraryInfo *TLI, 01374 const DominatorTree *DT, 01375 AssumptionTracker *AT, 01376 const Instruction *CxtI) { 01377 return ::SimplifyLShrInst(Op0, Op1, isExact, Query (DL, TLI, DT, AT, CxtI), 01378 RecursionLimit); 01379 } 01380 01381 /// SimplifyAShrInst - Given operands for an AShr, see if we can 01382 /// fold the result. If not, this returns null. 01383 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, 01384 const Query &Q, unsigned MaxRecurse) { 01385 if (Value *V = SimplifyShift(Instruction::AShr, Op0, Op1, Q, MaxRecurse)) 01386 return V; 01387 01388 // X >> X -> 0 01389 if (Op0 == Op1) 01390 return Constant::getNullValue(Op0->getType()); 01391 01392 // all ones >>a X -> all ones 01393 if (match(Op0, m_AllOnes())) 01394 return Op0; 01395 01396 // undef >>a X -> all ones 01397 if (match(Op0, m_Undef())) 01398 return Constant::getAllOnesValue(Op0->getType()); 01399 01400 // (X << A) >> A -> X 01401 Value *X; 01402 if (match(Op0, m_Shl(m_Value(X), m_Specific(Op1))) && 01403 cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap()) 01404 return X; 01405 01406 // Arithmetic shifting an all-sign-bit value is a no-op. 01407 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AT, Q.CxtI, Q.DT); 01408 if (NumSignBits == Op0->getType()->getScalarSizeInBits()) 01409 return Op0; 01410 01411 return nullptr; 01412 } 01413 01414 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, 01415 const DataLayout *DL, 01416 const TargetLibraryInfo *TLI, 01417 const DominatorTree *DT, 01418 AssumptionTracker *AT, 01419 const Instruction *CxtI) { 01420 return ::SimplifyAShrInst(Op0, Op1, isExact, Query (DL, TLI, DT, AT, CxtI), 01421 RecursionLimit); 01422 } 01423 01424 // Simplify (and (icmp ...) (icmp ...)) to true when we can tell that the range 01425 // of possible values cannot be satisfied. 01426 static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) { 01427 ICmpInst::Predicate Pred0, Pred1; 01428 ConstantInt *CI1, *CI2; 01429 Value *V; 01430 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_ConstantInt(CI1)), 01431 m_ConstantInt(CI2)))) 01432 return nullptr; 01433 01434 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Specific(CI1)))) 01435 return nullptr; 01436 01437 Type *ITy = Op0->getType(); 01438 01439 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0)); 01440 bool isNSW = AddInst->hasNoSignedWrap(); 01441 bool isNUW = AddInst->hasNoUnsignedWrap(); 01442 01443 const APInt &CI1V = CI1->getValue(); 01444 const APInt &CI2V = CI2->getValue(); 01445 const APInt Delta = CI2V - CI1V; 01446 if (CI1V.isStrictlyPositive()) { 01447 if (Delta == 2) { 01448 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT) 01449 return getFalse(ITy); 01450 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW) 01451 return getFalse(ITy); 01452 } 01453 if (Delta == 1) { 01454 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT) 01455 return getFalse(ITy); 01456 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW) 01457 return getFalse(ITy); 01458 } 01459 } 01460 if (CI1V.getBoolValue() && isNUW) { 01461 if (Delta == 2) 01462 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT) 01463 return getFalse(ITy); 01464 if (Delta == 1) 01465 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT) 01466 return getFalse(ITy); 01467 } 01468 01469 return nullptr; 01470 } 01471 01472 /// SimplifyAndInst - Given operands for an And, see if we can 01473 /// fold the result. If not, this returns null. 01474 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q, 01475 unsigned MaxRecurse) { 01476 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 01477 if (Constant *CRHS = dyn_cast<Constant>(Op1)) { 01478 Constant *Ops[] = { CLHS, CRHS }; 01479 return ConstantFoldInstOperands(Instruction::And, CLHS->getType(), 01480 Ops, Q.DL, Q.TLI); 01481 } 01482 01483 // Canonicalize the constant to the RHS. 01484 std::swap(Op0, Op1); 01485 } 01486 01487 // X & undef -> 0 01488 if (match(Op1, m_Undef())) 01489 return Constant::getNullValue(Op0->getType()); 01490 01491 // X & X = X 01492 if (Op0 == Op1) 01493 return Op0; 01494 01495 // X & 0 = 0 01496 if (match(Op1, m_Zero())) 01497 return Op1; 01498 01499 // X & -1 = X 01500 if (match(Op1, m_AllOnes())) 01501 return Op0; 01502 01503 // A & ~A = ~A & A = 0 01504 if (match(Op0, m_Not(m_Specific(Op1))) || 01505 match(Op1, m_Not(m_Specific(Op0)))) 01506 return Constant::getNullValue(Op0->getType()); 01507 01508 // (A | ?) & A = A 01509 Value *A = nullptr, *B = nullptr; 01510 if (match(Op0, m_Or(m_Value(A), m_Value(B))) && 01511 (A == Op1 || B == Op1)) 01512 return Op1; 01513 01514 // A & (A | ?) = A 01515 if (match(Op1, m_Or(m_Value(A), m_Value(B))) && 01516 (A == Op0 || B == Op0)) 01517 return Op0; 01518 01519 // A & (-A) = A if A is a power of two or zero. 01520 if (match(Op0, m_Neg(m_Specific(Op1))) || 01521 match(Op1, m_Neg(m_Specific(Op0)))) { 01522 if (isKnownToBeAPowerOfTwo(Op0, /*OrZero*/true, 0, Q.AT, Q.CxtI, Q.DT)) 01523 return Op0; 01524 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/true, 0, Q.AT, Q.CxtI, Q.DT)) 01525 return Op1; 01526 } 01527 01528 if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) { 01529 if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) { 01530 if (Value *V = SimplifyAndOfICmps(ICILHS, ICIRHS)) 01531 return V; 01532 if (Value *V = SimplifyAndOfICmps(ICIRHS, ICILHS)) 01533 return V; 01534 } 01535 } 01536 01537 // Try some generic simplifications for associative operations. 01538 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, 01539 MaxRecurse)) 01540 return V; 01541 01542 // And distributes over Or. Try some generic simplifications based on this. 01543 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or, 01544 Q, MaxRecurse)) 01545 return V; 01546 01547 // And distributes over Xor. Try some generic simplifications based on this. 01548 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor, 01549 Q, MaxRecurse)) 01550 return V; 01551 01552 // If the operation is with the result of a select instruction, check whether 01553 // operating on either branch of the select always yields the same value. 01554 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 01555 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q, 01556 MaxRecurse)) 01557 return V; 01558 01559 // If the operation is with the result of a phi instruction, check whether 01560 // operating on all incoming values of the phi always yields the same value. 01561 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 01562 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q, 01563 MaxRecurse)) 01564 return V; 01565 01566 return nullptr; 01567 } 01568 01569 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *DL, 01570 const TargetLibraryInfo *TLI, 01571 const DominatorTree *DT, AssumptionTracker *AT, 01572 const Instruction *CxtI) { 01573 return ::SimplifyAndInst(Op0, Op1, Query (DL, TLI, DT, AT, CxtI), 01574 RecursionLimit); 01575 } 01576 01577 // Simplify (or (icmp ...) (icmp ...)) to true when we can tell that the union 01578 // contains all possible values. 01579 static Value *SimplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) { 01580 ICmpInst::Predicate Pred0, Pred1; 01581 ConstantInt *CI1, *CI2; 01582 Value *V; 01583 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_ConstantInt(CI1)), 01584 m_ConstantInt(CI2)))) 01585 return nullptr; 01586 01587 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Specific(CI1)))) 01588 return nullptr; 01589 01590 Type *ITy = Op0->getType(); 01591 01592 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0)); 01593 bool isNSW = AddInst->hasNoSignedWrap(); 01594 bool isNUW = AddInst->hasNoUnsignedWrap(); 01595 01596 const APInt &CI1V = CI1->getValue(); 01597 const APInt &CI2V = CI2->getValue(); 01598 const APInt Delta = CI2V - CI1V; 01599 if (CI1V.isStrictlyPositive()) { 01600 if (Delta == 2) { 01601 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE) 01602 return getTrue(ITy); 01603 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW) 01604 return getTrue(ITy); 01605 } 01606 if (Delta == 1) { 01607 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE) 01608 return getTrue(ITy); 01609 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW) 01610 return getTrue(ITy); 01611 } 01612 } 01613 if (CI1V.getBoolValue() && isNUW) { 01614 if (Delta == 2) 01615 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE) 01616 return getTrue(ITy); 01617 if (Delta == 1) 01618 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE) 01619 return getTrue(ITy); 01620 } 01621 01622 return nullptr; 01623 } 01624 01625 /// SimplifyOrInst - Given operands for an Or, see if we can 01626 /// fold the result. If not, this returns null. 01627 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q, 01628 unsigned MaxRecurse) { 01629 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 01630 if (Constant *CRHS = dyn_cast<Constant>(Op1)) { 01631 Constant *Ops[] = { CLHS, CRHS }; 01632 return ConstantFoldInstOperands(Instruction::Or, CLHS->getType(), 01633 Ops, Q.DL, Q.TLI); 01634 } 01635 01636 // Canonicalize the constant to the RHS. 01637 std::swap(Op0, Op1); 01638 } 01639 01640 // X | undef -> -1 01641 if (match(Op1, m_Undef())) 01642 return Constant::getAllOnesValue(Op0->getType()); 01643 01644 // X | X = X 01645 if (Op0 == Op1) 01646 return Op0; 01647 01648 // X | 0 = X 01649 if (match(Op1, m_Zero())) 01650 return Op0; 01651 01652 // X | -1 = -1 01653 if (match(Op1, m_AllOnes())) 01654 return Op1; 01655 01656 // A | ~A = ~A | A = -1 01657 if (match(Op0, m_Not(m_Specific(Op1))) || 01658 match(Op1, m_Not(m_Specific(Op0)))) 01659 return Constant::getAllOnesValue(Op0->getType()); 01660 01661 // (A & ?) | A = A 01662 Value *A = nullptr, *B = nullptr; 01663 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 01664 (A == Op1 || B == Op1)) 01665 return Op1; 01666 01667 // A | (A & ?) = A 01668 if (match(Op1, m_And(m_Value(A), m_Value(B))) && 01669 (A == Op0 || B == Op0)) 01670 return Op0; 01671 01672 // ~(A & ?) | A = -1 01673 if (match(Op0, m_Not(m_And(m_Value(A), m_Value(B)))) && 01674 (A == Op1 || B == Op1)) 01675 return Constant::getAllOnesValue(Op1->getType()); 01676 01677 // A | ~(A & ?) = -1 01678 if (match(Op1, m_Not(m_And(m_Value(A), m_Value(B)))) && 01679 (A == Op0 || B == Op0)) 01680 return Constant::getAllOnesValue(Op0->getType()); 01681 01682 if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) { 01683 if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) { 01684 if (Value *V = SimplifyOrOfICmps(ICILHS, ICIRHS)) 01685 return V; 01686 if (Value *V = SimplifyOrOfICmps(ICIRHS, ICILHS)) 01687 return V; 01688 } 01689 } 01690 01691 // Try some generic simplifications for associative operations. 01692 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, 01693 MaxRecurse)) 01694 return V; 01695 01696 // Or distributes over And. Try some generic simplifications based on this. 01697 if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q, 01698 MaxRecurse)) 01699 return V; 01700 01701 // If the operation is with the result of a select instruction, check whether 01702 // operating on either branch of the select always yields the same value. 01703 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 01704 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, 01705 MaxRecurse)) 01706 return V; 01707 01708 // (A & C)|(B & D) 01709 Value *C = nullptr, *D = nullptr; 01710 if (match(Op0, m_And(m_Value(A), m_Value(C))) && 01711 match(Op1, m_And(m_Value(B), m_Value(D)))) { 01712 ConstantInt *C1 = dyn_cast<ConstantInt>(C); 01713 ConstantInt *C2 = dyn_cast<ConstantInt>(D); 01714 if (C1 && C2 && (C1->getValue() == ~C2->getValue())) { 01715 // (A & C1)|(B & C2) 01716 // If we have: ((V + N) & C1) | (V & C2) 01717 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0 01718 // replace with V+N. 01719 Value *V1, *V2; 01720 if ((C2->getValue() & (C2->getValue() + 1)) == 0 && // C2 == 0+1+ 01721 match(A, m_Add(m_Value(V1), m_Value(V2)))) { 01722 // Add commutes, try both ways. 01723 if (V1 == B && MaskedValueIsZero(V2, C2->getValue(), Q.DL, 01724 0, Q.AT, Q.CxtI, Q.DT)) 01725 return A; 01726 if (V2 == B && MaskedValueIsZero(V1, C2->getValue(), Q.DL, 01727 0, Q.AT, Q.CxtI, Q.DT)) 01728 return A; 01729 } 01730 // Or commutes, try both ways. 01731 if ((C1->getValue() & (C1->getValue() + 1)) == 0 && 01732 match(B, m_Add(m_Value(V1), m_Value(V2)))) { 01733 // Add commutes, try both ways. 01734 if (V1 == A && MaskedValueIsZero(V2, C1->getValue(), Q.DL, 01735 0, Q.AT, Q.CxtI, Q.DT)) 01736 return B; 01737 if (V2 == A && MaskedValueIsZero(V1, C1->getValue(), Q.DL, 01738 0, Q.AT, Q.CxtI, Q.DT)) 01739 return B; 01740 } 01741 } 01742 } 01743 01744 // If the operation is with the result of a phi instruction, check whether 01745 // operating on all incoming values of the phi always yields the same value. 01746 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 01747 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse)) 01748 return V; 01749 01750 return nullptr; 01751 } 01752 01753 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *DL, 01754 const TargetLibraryInfo *TLI, 01755 const DominatorTree *DT, AssumptionTracker *AT, 01756 const Instruction *CxtI) { 01757 return ::SimplifyOrInst(Op0, Op1, Query (DL, TLI, DT, AT, CxtI), 01758 RecursionLimit); 01759 } 01760 01761 /// SimplifyXorInst - Given operands for a Xor, see if we can 01762 /// fold the result. If not, this returns null. 01763 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q, 01764 unsigned MaxRecurse) { 01765 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 01766 if (Constant *CRHS = dyn_cast<Constant>(Op1)) { 01767 Constant *Ops[] = { CLHS, CRHS }; 01768 return ConstantFoldInstOperands(Instruction::Xor, CLHS->getType(), 01769 Ops, Q.DL, Q.TLI); 01770 } 01771 01772 // Canonicalize the constant to the RHS. 01773 std::swap(Op0, Op1); 01774 } 01775 01776 // A ^ undef -> undef 01777 if (match(Op1, m_Undef())) 01778 return Op1; 01779 01780 // A ^ 0 = A 01781 if (match(Op1, m_Zero())) 01782 return Op0; 01783 01784 // A ^ A = 0 01785 if (Op0 == Op1) 01786 return Constant::getNullValue(Op0->getType()); 01787 01788 // A ^ ~A = ~A ^ A = -1 01789 if (match(Op0, m_Not(m_Specific(Op1))) || 01790 match(Op1, m_Not(m_Specific(Op0)))) 01791 return Constant::getAllOnesValue(Op0->getType()); 01792 01793 // Try some generic simplifications for associative operations. 01794 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, 01795 MaxRecurse)) 01796 return V; 01797 01798 // Threading Xor over selects and phi nodes is pointless, so don't bother. 01799 // Threading over the select in "A ^ select(cond, B, C)" means evaluating 01800 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and 01801 // only if B and C are equal. If B and C are equal then (since we assume 01802 // that operands have already been simplified) "select(cond, B, C)" should 01803 // have been simplified to the common value of B and C already. Analysing 01804 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly 01805 // for threading over phi nodes. 01806 01807 return nullptr; 01808 } 01809 01810 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *DL, 01811 const TargetLibraryInfo *TLI, 01812 const DominatorTree *DT, AssumptionTracker *AT, 01813 const Instruction *CxtI) { 01814 return ::SimplifyXorInst(Op0, Op1, Query (DL, TLI, DT, AT, CxtI), 01815 RecursionLimit); 01816 } 01817 01818 static Type *GetCompareTy(Value *Op) { 01819 return CmpInst::makeCmpResultType(Op->getType()); 01820 } 01821 01822 /// ExtractEquivalentCondition - Rummage around inside V looking for something 01823 /// equivalent to the comparison "LHS Pred RHS". Return such a value if found, 01824 /// otherwise return null. Helper function for analyzing max/min idioms. 01825 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred, 01826 Value *LHS, Value *RHS) { 01827 SelectInst *SI = dyn_cast<SelectInst>(V); 01828 if (!SI) 01829 return nullptr; 01830 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 01831 if (!Cmp) 01832 return nullptr; 01833 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1); 01834 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS) 01835 return Cmp; 01836 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) && 01837 LHS == CmpRHS && RHS == CmpLHS) 01838 return Cmp; 01839 return nullptr; 01840 } 01841 01842 // A significant optimization not implemented here is assuming that alloca 01843 // addresses are not equal to incoming argument values. They don't *alias*, 01844 // as we say, but that doesn't mean they aren't equal, so we take a 01845 // conservative approach. 01846 // 01847 // This is inspired in part by C++11 5.10p1: 01848 // "Two pointers of the same type compare equal if and only if they are both 01849 // null, both point to the same function, or both represent the same 01850 // address." 01851 // 01852 // This is pretty permissive. 01853 // 01854 // It's also partly due to C11 6.5.9p6: 01855 // "Two pointers compare equal if and only if both are null pointers, both are 01856 // pointers to the same object (including a pointer to an object and a 01857 // subobject at its beginning) or function, both are pointers to one past the 01858 // last element of the same array object, or one is a pointer to one past the 01859 // end of one array object and the other is a pointer to the start of a 01860 // different array object that happens to immediately follow the first array 01861 // object in the address space.) 01862 // 01863 // C11's version is more restrictive, however there's no reason why an argument 01864 // couldn't be a one-past-the-end value for a stack object in the caller and be 01865 // equal to the beginning of a stack object in the callee. 01866 // 01867 // If the C and C++ standards are ever made sufficiently restrictive in this 01868 // area, it may be possible to update LLVM's semantics accordingly and reinstate 01869 // this optimization. 01870 static Constant *computePointerICmp(const DataLayout *DL, 01871 const TargetLibraryInfo *TLI, 01872 CmpInst::Predicate Pred, 01873 Value *LHS, Value *RHS) { 01874 // First, skip past any trivial no-ops. 01875 LHS = LHS->stripPointerCasts(); 01876 RHS = RHS->stripPointerCasts(); 01877 01878 // A non-null pointer is not equal to a null pointer. 01879 if (llvm::isKnownNonNull(LHS, TLI) && isa<ConstantPointerNull>(RHS) && 01880 (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE)) 01881 return ConstantInt::get(GetCompareTy(LHS), 01882 !CmpInst::isTrueWhenEqual(Pred)); 01883 01884 // We can only fold certain predicates on pointer comparisons. 01885 switch (Pred) { 01886 default: 01887 return nullptr; 01888 01889 // Equality comaprisons are easy to fold. 01890 case CmpInst::ICMP_EQ: 01891 case CmpInst::ICMP_NE: 01892 break; 01893 01894 // We can only handle unsigned relational comparisons because 'inbounds' on 01895 // a GEP only protects against unsigned wrapping. 01896 case CmpInst::ICMP_UGT: 01897 case CmpInst::ICMP_UGE: 01898 case CmpInst::ICMP_ULT: 01899 case CmpInst::ICMP_ULE: 01900 // However, we have to switch them to their signed variants to handle 01901 // negative indices from the base pointer. 01902 Pred = ICmpInst::getSignedPredicate(Pred); 01903 break; 01904 } 01905 01906 // Strip off any constant offsets so that we can reason about them. 01907 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets 01908 // here and compare base addresses like AliasAnalysis does, however there are 01909 // numerous hazards. AliasAnalysis and its utilities rely on special rules 01910 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis 01911 // doesn't need to guarantee pointer inequality when it says NoAlias. 01912 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); 01913 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); 01914 01915 // If LHS and RHS are related via constant offsets to the same base 01916 // value, we can replace it with an icmp which just compares the offsets. 01917 if (LHS == RHS) 01918 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset); 01919 01920 // Various optimizations for (in)equality comparisons. 01921 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) { 01922 // Different non-empty allocations that exist at the same time have 01923 // different addresses (if the program can tell). Global variables always 01924 // exist, so they always exist during the lifetime of each other and all 01925 // allocas. Two different allocas usually have different addresses... 01926 // 01927 // However, if there's an @llvm.stackrestore dynamically in between two 01928 // allocas, they may have the same address. It's tempting to reduce the 01929 // scope of the problem by only looking at *static* allocas here. That would 01930 // cover the majority of allocas while significantly reducing the likelihood 01931 // of having an @llvm.stackrestore pop up in the middle. However, it's not 01932 // actually impossible for an @llvm.stackrestore to pop up in the middle of 01933 // an entry block. Also, if we have a block that's not attached to a 01934 // function, we can't tell if it's "static" under the current definition. 01935 // Theoretically, this problem could be fixed by creating a new kind of 01936 // instruction kind specifically for static allocas. Such a new instruction 01937 // could be required to be at the top of the entry block, thus preventing it 01938 // from being subject to a @llvm.stackrestore. Instcombine could even 01939 // convert regular allocas into these special allocas. It'd be nifty. 01940 // However, until then, this problem remains open. 01941 // 01942 // So, we'll assume that two non-empty allocas have different addresses 01943 // for now. 01944 // 01945 // With all that, if the offsets are within the bounds of their allocations 01946 // (and not one-past-the-end! so we can't use inbounds!), and their 01947 // allocations aren't the same, the pointers are not equal. 01948 // 01949 // Note that it's not necessary to check for LHS being a global variable 01950 // address, due to canonicalization and constant folding. 01951 if (isa<AllocaInst>(LHS) && 01952 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) { 01953 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset); 01954 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset); 01955 uint64_t LHSSize, RHSSize; 01956 if (LHSOffsetCI && RHSOffsetCI && 01957 getObjectSize(LHS, LHSSize, DL, TLI) && 01958 getObjectSize(RHS, RHSSize, DL, TLI)) { 01959 const APInt &LHSOffsetValue = LHSOffsetCI->getValue(); 01960 const APInt &RHSOffsetValue = RHSOffsetCI->getValue(); 01961 if (!LHSOffsetValue.isNegative() && 01962 !RHSOffsetValue.isNegative() && 01963 LHSOffsetValue.ult(LHSSize) && 01964 RHSOffsetValue.ult(RHSSize)) { 01965 return ConstantInt::get(GetCompareTy(LHS), 01966 !CmpInst::isTrueWhenEqual(Pred)); 01967 } 01968 } 01969 01970 // Repeat the above check but this time without depending on DataLayout 01971 // or being able to compute a precise size. 01972 if (!cast<PointerType>(LHS->getType())->isEmptyTy() && 01973 !cast<PointerType>(RHS->getType())->isEmptyTy() && 01974 LHSOffset->isNullValue() && 01975 RHSOffset->isNullValue()) 01976 return ConstantInt::get(GetCompareTy(LHS), 01977 !CmpInst::isTrueWhenEqual(Pred)); 01978 } 01979 01980 // Even if an non-inbounds GEP occurs along the path we can still optimize 01981 // equality comparisons concerning the result. We avoid walking the whole 01982 // chain again by starting where the last calls to 01983 // stripAndComputeConstantOffsets left off and accumulate the offsets. 01984 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true); 01985 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true); 01986 if (LHS == RHS) 01987 return ConstantExpr::getICmp(Pred, 01988 ConstantExpr::getAdd(LHSOffset, LHSNoBound), 01989 ConstantExpr::getAdd(RHSOffset, RHSNoBound)); 01990 } 01991 01992 // Otherwise, fail. 01993 return nullptr; 01994 } 01995 01996 /// SimplifyICmpInst - Given operands for an ICmpInst, see if we can 01997 /// fold the result. If not, this returns null. 01998 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 01999 const Query &Q, unsigned MaxRecurse) { 02000 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; 02001 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!"); 02002 02003 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 02004 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 02005 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); 02006 02007 // If we have a constant, make sure it is on the RHS. 02008 std::swap(LHS, RHS); 02009 Pred = CmpInst::getSwappedPredicate(Pred); 02010 } 02011 02012 Type *ITy = GetCompareTy(LHS); // The return type. 02013 Type *OpTy = LHS->getType(); // The operand type. 02014 02015 // icmp X, X -> true/false 02016 // X icmp undef -> true/false. For example, icmp ugt %X, undef -> false 02017 // because X could be 0. 02018 if (LHS == RHS || isa<UndefValue>(RHS)) 02019 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred)); 02020 02021 // Special case logic when the operands have i1 type. 02022 if (OpTy->getScalarType()->isIntegerTy(1)) { 02023 switch (Pred) { 02024 default: break; 02025 case ICmpInst::ICMP_EQ: 02026 // X == 1 -> X 02027 if (match(RHS, m_One())) 02028 return LHS; 02029 break; 02030 case ICmpInst::ICMP_NE: 02031 // X != 0 -> X 02032 if (match(RHS, m_Zero())) 02033 return LHS; 02034 break; 02035 case ICmpInst::ICMP_UGT: 02036 // X >u 0 -> X 02037 if (match(RHS, m_Zero())) 02038 return LHS; 02039 break; 02040 case ICmpInst::ICMP_UGE: 02041 // X >=u 1 -> X 02042 if (match(RHS, m_One())) 02043 return LHS; 02044 break; 02045 case ICmpInst::ICMP_SLT: 02046 // X <s 0 -> X 02047 if (match(RHS, m_Zero())) 02048 return LHS; 02049 break; 02050 case ICmpInst::ICMP_SLE: 02051 // X <=s -1 -> X 02052 if (match(RHS, m_One())) 02053 return LHS; 02054 break; 02055 } 02056 } 02057 02058 // If we are comparing with zero then try hard since this is a common case. 02059 if (match(RHS, m_Zero())) { 02060 bool LHSKnownNonNegative, LHSKnownNegative; 02061 switch (Pred) { 02062 default: llvm_unreachable("Unknown ICmp predicate!"); 02063 case ICmpInst::ICMP_ULT: 02064 return getFalse(ITy); 02065 case ICmpInst::ICMP_UGE: 02066 return getTrue(ITy); 02067 case ICmpInst::ICMP_EQ: 02068 case ICmpInst::ICMP_ULE: 02069 if (isKnownNonZero(LHS, Q.DL, 0, Q.AT, Q.CxtI, Q.DT)) 02070 return getFalse(ITy); 02071 break; 02072 case ICmpInst::ICMP_NE: 02073 case ICmpInst::ICMP_UGT: 02074 if (isKnownNonZero(LHS, Q.DL, 0, Q.AT, Q.CxtI, Q.DT)) 02075 return getTrue(ITy); 02076 break; 02077 case ICmpInst::ICMP_SLT: 02078 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 02079 0, Q.AT, Q.CxtI, Q.DT); 02080 if (LHSKnownNegative) 02081 return getTrue(ITy); 02082 if (LHSKnownNonNegative) 02083 return getFalse(ITy); 02084 break; 02085 case ICmpInst::ICMP_SLE: 02086 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 02087 0, Q.AT, Q.CxtI, Q.DT); 02088 if (LHSKnownNegative) 02089 return getTrue(ITy); 02090 if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL, 02091 0, Q.AT, Q.CxtI, Q.DT)) 02092 return getFalse(ITy); 02093 break; 02094 case ICmpInst::ICMP_SGE: 02095 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 02096 0, Q.AT, Q.CxtI, Q.DT); 02097 if (LHSKnownNegative) 02098 return getFalse(ITy); 02099 if (LHSKnownNonNegative) 02100 return getTrue(ITy); 02101 break; 02102 case ICmpInst::ICMP_SGT: 02103 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 02104 0, Q.AT, Q.CxtI, Q.DT); 02105 if (LHSKnownNegative) 02106 return getFalse(ITy); 02107 if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL, 02108 0, Q.AT, Q.CxtI, Q.DT)) 02109 return getTrue(ITy); 02110 break; 02111 } 02112 } 02113 02114 // See if we are doing a comparison with a constant integer. 02115 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 02116 // Rule out tautological comparisons (eg., ult 0 or uge 0). 02117 ConstantRange RHS_CR = ICmpInst::makeConstantRange(Pred, CI->getValue()); 02118 if (RHS_CR.isEmptySet()) 02119 return ConstantInt::getFalse(CI->getContext()); 02120 if (RHS_CR.isFullSet()) 02121 return ConstantInt::getTrue(CI->getContext()); 02122 02123 // Many binary operators with constant RHS have easy to compute constant 02124 // range. Use them to check whether the comparison is a tautology. 02125 unsigned Width = CI->getBitWidth(); 02126 APInt Lower = APInt(Width, 0); 02127 APInt Upper = APInt(Width, 0); 02128 ConstantInt *CI2; 02129 if (match(LHS, m_URem(m_Value(), m_ConstantInt(CI2)))) { 02130 // 'urem x, CI2' produces [0, CI2). 02131 Upper = CI2->getValue(); 02132 } else if (match(LHS, m_SRem(m_Value(), m_ConstantInt(CI2)))) { 02133 // 'srem x, CI2' produces (-|CI2|, |CI2|). 02134 Upper = CI2->getValue().abs(); 02135 Lower = (-Upper) + 1; 02136 } else if (match(LHS, m_UDiv(m_ConstantInt(CI2), m_Value()))) { 02137 // 'udiv CI2, x' produces [0, CI2]. 02138 Upper = CI2->getValue() + 1; 02139 } else if (match(LHS, m_UDiv(m_Value(), m_ConstantInt(CI2)))) { 02140 // 'udiv x, CI2' produces [0, UINT_MAX / CI2]. 02141 APInt NegOne = APInt::getAllOnesValue(Width); 02142 if (!CI2->isZero()) 02143 Upper = NegOne.udiv(CI2->getValue()) + 1; 02144 } else if (match(LHS, m_SDiv(m_ConstantInt(CI2), m_Value()))) { 02145 if (CI2->isMinSignedValue()) { 02146 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2]. 02147 Lower = CI2->getValue(); 02148 Upper = Lower.lshr(1) + 1; 02149 } else { 02150 // 'sdiv CI2, x' produces [-|CI2|, |CI2|]. 02151 Upper = CI2->getValue().abs() + 1; 02152 Lower = (-Upper) + 1; 02153 } 02154 } else if (match(LHS, m_SDiv(m_Value(), m_ConstantInt(CI2)))) { 02155 APInt IntMin = APInt::getSignedMinValue(Width); 02156 APInt IntMax = APInt::getSignedMaxValue(Width); 02157 APInt Val = CI2->getValue(); 02158 if (Val.isAllOnesValue()) { 02159 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX] 02160 // where CI2 != -1 and CI2 != 0 and CI2 != 1 02161 Lower = IntMin + 1; 02162 Upper = IntMax + 1; 02163 } else if (Val.countLeadingZeros() < Width - 1) { 02164 // 'sdiv x, CI2' produces [INT_MIN / CI2, INT_MAX / CI2] 02165 // where CI2 != -1 and CI2 != 0 and CI2 != 1 02166 Lower = IntMin.sdiv(Val); 02167 Upper = IntMax.sdiv(Val); 02168 if (Lower.sgt(Upper)) 02169 std::swap(Lower, Upper); 02170 Upper = Upper + 1; 02171 assert(Upper != Lower && "Upper part of range has wrapped!"); 02172 } 02173 } else if (match(LHS, m_NUWShl(m_ConstantInt(CI2), m_Value()))) { 02174 // 'shl nuw CI2, x' produces [CI2, CI2 << CLZ(CI2)] 02175 Lower = CI2->getValue(); 02176 Upper = Lower.shl(Lower.countLeadingZeros()) + 1; 02177 } else if (match(LHS, m_NSWShl(m_ConstantInt(CI2), m_Value()))) { 02178 if (CI2->isNegative()) { 02179 // 'shl nsw CI2, x' produces [CI2 << CLO(CI2)-1, CI2] 02180 unsigned ShiftAmount = CI2->getValue().countLeadingOnes() - 1; 02181 Lower = CI2->getValue().shl(ShiftAmount); 02182 Upper = CI2->getValue() + 1; 02183 } else { 02184 // 'shl nsw CI2, x' produces [CI2, CI2 << CLZ(CI2)-1] 02185 unsigned ShiftAmount = CI2->getValue().countLeadingZeros() - 1; 02186 Lower = CI2->getValue(); 02187 Upper = CI2->getValue().shl(ShiftAmount) + 1; 02188 } 02189 } else if (match(LHS, m_LShr(m_Value(), m_ConstantInt(CI2)))) { 02190 // 'lshr x, CI2' produces [0, UINT_MAX >> CI2]. 02191 APInt NegOne = APInt::getAllOnesValue(Width); 02192 if (CI2->getValue().ult(Width)) 02193 Upper = NegOne.lshr(CI2->getValue()) + 1; 02194 } else if (match(LHS, m_LShr(m_ConstantInt(CI2), m_Value()))) { 02195 // 'lshr CI2, x' produces [CI2 >> (Width-1), CI2]. 02196 unsigned ShiftAmount = Width - 1; 02197 if (!CI2->isZero() && cast<BinaryOperator>(LHS)->isExact()) 02198 ShiftAmount = CI2->getValue().countTrailingZeros(); 02199 Lower = CI2->getValue().lshr(ShiftAmount); 02200 Upper = CI2->getValue() + 1; 02201 } else if (match(LHS, m_AShr(m_Value(), m_ConstantInt(CI2)))) { 02202 // 'ashr x, CI2' produces [INT_MIN >> CI2, INT_MAX >> CI2]. 02203 APInt IntMin = APInt::getSignedMinValue(Width); 02204 APInt IntMax = APInt::getSignedMaxValue(Width); 02205 if (CI2->getValue().ult(Width)) { 02206 Lower = IntMin.ashr(CI2->getValue()); 02207 Upper = IntMax.ashr(CI2->getValue()) + 1; 02208 } 02209 } else if (match(LHS, m_AShr(m_ConstantInt(CI2), m_Value()))) { 02210 unsigned ShiftAmount = Width - 1; 02211 if (!CI2->isZero() && cast<BinaryOperator>(LHS)->isExact()) 02212 ShiftAmount = CI2->getValue().countTrailingZeros(); 02213 if (CI2->isNegative()) { 02214 // 'ashr CI2, x' produces [CI2, CI2 >> (Width-1)] 02215 Lower = CI2->getValue(); 02216 Upper = CI2->getValue().ashr(ShiftAmount) + 1; 02217 } else { 02218 // 'ashr CI2, x' produces [CI2 >> (Width-1), CI2] 02219 Lower = CI2->getValue().ashr(ShiftAmount); 02220 Upper = CI2->getValue() + 1; 02221 } 02222 } else if (match(LHS, m_Or(m_Value(), m_ConstantInt(CI2)))) { 02223 // 'or x, CI2' produces [CI2, UINT_MAX]. 02224 Lower = CI2->getValue(); 02225 } else if (match(LHS, m_And(m_Value(), m_ConstantInt(CI2)))) { 02226 // 'and x, CI2' produces [0, CI2]. 02227 Upper = CI2->getValue() + 1; 02228 } 02229 if (Lower != Upper) { 02230 ConstantRange LHS_CR = ConstantRange(Lower, Upper); 02231 if (RHS_CR.contains(LHS_CR)) 02232 return ConstantInt::getTrue(RHS->getContext()); 02233 if (RHS_CR.inverse().contains(LHS_CR)) 02234 return ConstantInt::getFalse(RHS->getContext()); 02235 } 02236 } 02237 02238 // Compare of cast, for example (zext X) != 0 -> X != 0 02239 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) { 02240 Instruction *LI = cast<CastInst>(LHS); 02241 Value *SrcOp = LI->getOperand(0); 02242 Type *SrcTy = SrcOp->getType(); 02243 Type *DstTy = LI->getType(); 02244 02245 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input 02246 // if the integer type is the same size as the pointer type. 02247 if (MaxRecurse && Q.DL && isa<PtrToIntInst>(LI) && 02248 Q.DL->getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) { 02249 if (Constant *RHSC = dyn_cast<Constant>(RHS)) { 02250 // Transfer the cast to the constant. 02251 if (Value *V = SimplifyICmpInst(Pred, SrcOp, 02252 ConstantExpr::getIntToPtr(RHSC, SrcTy), 02253 Q, MaxRecurse-1)) 02254 return V; 02255 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) { 02256 if (RI->getOperand(0)->getType() == SrcTy) 02257 // Compare without the cast. 02258 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), 02259 Q, MaxRecurse-1)) 02260 return V; 02261 } 02262 } 02263 02264 if (isa<ZExtInst>(LHS)) { 02265 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the 02266 // same type. 02267 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) { 02268 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) 02269 // Compare X and Y. Note that signed predicates become unsigned. 02270 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), 02271 SrcOp, RI->getOperand(0), Q, 02272 MaxRecurse-1)) 02273 return V; 02274 } 02275 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended 02276 // too. If not, then try to deduce the result of the comparison. 02277 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 02278 // Compute the constant that would happen if we truncated to SrcTy then 02279 // reextended to DstTy. 02280 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); 02281 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy); 02282 02283 // If the re-extended constant didn't change then this is effectively 02284 // also a case of comparing two zero-extended values. 02285 if (RExt == CI && MaxRecurse) 02286 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), 02287 SrcOp, Trunc, Q, MaxRecurse-1)) 02288 return V; 02289 02290 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit 02291 // there. Use this to work out the result of the comparison. 02292 if (RExt != CI) { 02293 switch (Pred) { 02294 default: llvm_unreachable("Unknown ICmp predicate!"); 02295 // LHS <u RHS. 02296 case ICmpInst::ICMP_EQ: 02297 case ICmpInst::ICMP_UGT: 02298 case ICmpInst::ICMP_UGE: 02299 return ConstantInt::getFalse(CI->getContext()); 02300 02301 case ICmpInst::ICMP_NE: 02302 case ICmpInst::ICMP_ULT: 02303 case ICmpInst::ICMP_ULE: 02304 return ConstantInt::getTrue(CI->getContext()); 02305 02306 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS 02307 // is non-negative then LHS <s RHS. 02308 case ICmpInst::ICMP_SGT: 02309 case ICmpInst::ICMP_SGE: 02310 return CI->getValue().isNegative() ? 02311 ConstantInt::getTrue(CI->getContext()) : 02312 ConstantInt::getFalse(CI->getContext()); 02313 02314 case ICmpInst::ICMP_SLT: 02315 case ICmpInst::ICMP_SLE: 02316 return CI->getValue().isNegative() ? 02317 ConstantInt::getFalse(CI->getContext()) : 02318 ConstantInt::getTrue(CI->getContext()); 02319 } 02320 } 02321 } 02322 } 02323 02324 if (isa<SExtInst>(LHS)) { 02325 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the 02326 // same type. 02327 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) { 02328 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) 02329 // Compare X and Y. Note that the predicate does not change. 02330 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), 02331 Q, MaxRecurse-1)) 02332 return V; 02333 } 02334 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended 02335 // too. If not, then try to deduce the result of the comparison. 02336 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 02337 // Compute the constant that would happen if we truncated to SrcTy then 02338 // reextended to DstTy. 02339 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); 02340 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy); 02341 02342 // If the re-extended constant didn't change then this is effectively 02343 // also a case of comparing two sign-extended values. 02344 if (RExt == CI && MaxRecurse) 02345 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1)) 02346 return V; 02347 02348 // Otherwise the upper bits of LHS are all equal, while RHS has varying 02349 // bits there. Use this to work out the result of the comparison. 02350 if (RExt != CI) { 02351 switch (Pred) { 02352 default: llvm_unreachable("Unknown ICmp predicate!"); 02353 case ICmpInst::ICMP_EQ: 02354 return ConstantInt::getFalse(CI->getContext()); 02355 case ICmpInst::ICMP_NE: 02356 return ConstantInt::getTrue(CI->getContext()); 02357 02358 // If RHS is non-negative then LHS <s RHS. If RHS is negative then 02359 // LHS >s RHS. 02360 case ICmpInst::ICMP_SGT: 02361 case ICmpInst::ICMP_SGE: 02362 return CI->getValue().isNegative() ? 02363 ConstantInt::getTrue(CI->getContext()) : 02364 ConstantInt::getFalse(CI->getContext()); 02365 case ICmpInst::ICMP_SLT: 02366 case ICmpInst::ICMP_SLE: 02367 return CI->getValue().isNegative() ? 02368 ConstantInt::getFalse(CI->getContext()) : 02369 ConstantInt::getTrue(CI->getContext()); 02370 02371 // If LHS is non-negative then LHS <u RHS. If LHS is negative then 02372 // LHS >u RHS. 02373 case ICmpInst::ICMP_UGT: 02374 case ICmpInst::ICMP_UGE: 02375 // Comparison is true iff the LHS <s 0. 02376 if (MaxRecurse) 02377 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp, 02378 Constant::getNullValue(SrcTy), 02379 Q, MaxRecurse-1)) 02380 return V; 02381 break; 02382 case ICmpInst::ICMP_ULT: 02383 case ICmpInst::ICMP_ULE: 02384 // Comparison is true iff the LHS >=s 0. 02385 if (MaxRecurse) 02386 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp, 02387 Constant::getNullValue(SrcTy), 02388 Q, MaxRecurse-1)) 02389 return V; 02390 break; 02391 } 02392 } 02393 } 02394 } 02395 } 02396 02397 // If a bit is known to be zero for A and known to be one for B, 02398 // then A and B cannot be equal. 02399 if (ICmpInst::isEquality(Pred)) { 02400 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 02401 uint32_t BitWidth = CI->getBitWidth(); 02402 APInt LHSKnownZero(BitWidth, 0); 02403 APInt LHSKnownOne(BitWidth, 0); 02404 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, Q.DL, 02405 0, Q.AT, Q.CxtI, Q.DT); 02406 APInt RHSKnownZero(BitWidth, 0); 02407 APInt RHSKnownOne(BitWidth, 0); 02408 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, Q.DL, 02409 0, Q.AT, Q.CxtI, Q.DT); 02410 if (((LHSKnownOne & RHSKnownZero) != 0) || 02411 ((LHSKnownZero & RHSKnownOne) != 0)) 02412 return (Pred == ICmpInst::ICMP_EQ) 02413 ? ConstantInt::getFalse(CI->getContext()) 02414 : ConstantInt::getTrue(CI->getContext()); 02415 } 02416 } 02417 02418 // Special logic for binary operators. 02419 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS); 02420 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS); 02421 if (MaxRecurse && (LBO || RBO)) { 02422 // Analyze the case when either LHS or RHS is an add instruction. 02423 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; 02424 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null). 02425 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false; 02426 if (LBO && LBO->getOpcode() == Instruction::Add) { 02427 A = LBO->getOperand(0); B = LBO->getOperand(1); 02428 NoLHSWrapProblem = ICmpInst::isEquality(Pred) || 02429 (CmpInst::isUnsigned(Pred) && LBO->hasNoUnsignedWrap()) || 02430 (CmpInst::isSigned(Pred) && LBO->hasNoSignedWrap()); 02431 } 02432 if (RBO && RBO->getOpcode() == Instruction::Add) { 02433 C = RBO->getOperand(0); D = RBO->getOperand(1); 02434 NoRHSWrapProblem = ICmpInst::isEquality(Pred) || 02435 (CmpInst::isUnsigned(Pred) && RBO->hasNoUnsignedWrap()) || 02436 (CmpInst::isSigned(Pred) && RBO->hasNoSignedWrap()); 02437 } 02438 02439 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow. 02440 if ((A == RHS || B == RHS) && NoLHSWrapProblem) 02441 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A, 02442 Constant::getNullValue(RHS->getType()), 02443 Q, MaxRecurse-1)) 02444 return V; 02445 02446 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow. 02447 if ((C == LHS || D == LHS) && NoRHSWrapProblem) 02448 if (Value *V = SimplifyICmpInst(Pred, 02449 Constant::getNullValue(LHS->getType()), 02450 C == LHS ? D : C, Q, MaxRecurse-1)) 02451 return V; 02452 02453 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow. 02454 if (A && C && (A == C || A == D || B == C || B == D) && 02455 NoLHSWrapProblem && NoRHSWrapProblem) { 02456 // Determine Y and Z in the form icmp (X+Y), (X+Z). 02457 Value *Y, *Z; 02458 if (A == C) { 02459 // C + B == C + D -> B == D 02460 Y = B; 02461 Z = D; 02462 } else if (A == D) { 02463 // D + B == C + D -> B == C 02464 Y = B; 02465 Z = C; 02466 } else if (B == C) { 02467 // A + C == C + D -> A == D 02468 Y = A; 02469 Z = D; 02470 } else { 02471 assert(B == D); 02472 // A + D == C + D -> A == C 02473 Y = A; 02474 Z = C; 02475 } 02476 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse-1)) 02477 return V; 02478 } 02479 } 02480 02481 // 0 - (zext X) pred C 02482 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) { 02483 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) { 02484 if (RHSC->getValue().isStrictlyPositive()) { 02485 if (Pred == ICmpInst::ICMP_SLT) 02486 return ConstantInt::getTrue(RHSC->getContext()); 02487 if (Pred == ICmpInst::ICMP_SGE) 02488 return ConstantInt::getFalse(RHSC->getContext()); 02489 if (Pred == ICmpInst::ICMP_EQ) 02490 return ConstantInt::getFalse(RHSC->getContext()); 02491 if (Pred == ICmpInst::ICMP_NE) 02492 return ConstantInt::getTrue(RHSC->getContext()); 02493 } 02494 if (RHSC->getValue().isNonNegative()) { 02495 if (Pred == ICmpInst::ICMP_SLE) 02496 return ConstantInt::getTrue(RHSC->getContext()); 02497 if (Pred == ICmpInst::ICMP_SGT) 02498 return ConstantInt::getFalse(RHSC->getContext()); 02499 } 02500 } 02501 } 02502 02503 // icmp pred (urem X, Y), Y 02504 if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) { 02505 bool KnownNonNegative, KnownNegative; 02506 switch (Pred) { 02507 default: 02508 break; 02509 case ICmpInst::ICMP_SGT: 02510 case ICmpInst::ICMP_SGE: 02511 ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 02512 0, Q.AT, Q.CxtI, Q.DT); 02513 if (!KnownNonNegative) 02514 break; 02515 // fall-through 02516 case ICmpInst::ICMP_EQ: 02517 case ICmpInst::ICMP_UGT: 02518 case ICmpInst::ICMP_UGE: 02519 return getFalse(ITy); 02520 case ICmpInst::ICMP_SLT: 02521 case ICmpInst::ICMP_SLE: 02522 ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 02523 0, Q.AT, Q.CxtI, Q.DT); 02524 if (!KnownNonNegative) 02525 break; 02526 // fall-through 02527 case ICmpInst::ICMP_NE: 02528 case ICmpInst::ICMP_ULT: 02529 case ICmpInst::ICMP_ULE: 02530 return getTrue(ITy); 02531 } 02532 } 02533 02534 // icmp pred X, (urem Y, X) 02535 if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) { 02536 bool KnownNonNegative, KnownNegative; 02537 switch (Pred) { 02538 default: 02539 break; 02540 case ICmpInst::ICMP_SGT: 02541 case ICmpInst::ICMP_SGE: 02542 ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 02543 0, Q.AT, Q.CxtI, Q.DT); 02544 if (!KnownNonNegative) 02545 break; 02546 // fall-through 02547 case ICmpInst::ICMP_NE: 02548 case ICmpInst::ICMP_UGT: 02549 case ICmpInst::ICMP_UGE: 02550 return getTrue(ITy); 02551 case ICmpInst::ICMP_SLT: 02552 case ICmpInst::ICMP_SLE: 02553 ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 02554 0, Q.AT, Q.CxtI, Q.DT); 02555 if (!KnownNonNegative) 02556 break; 02557 // fall-through 02558 case ICmpInst::ICMP_EQ: 02559 case ICmpInst::ICMP_ULT: 02560 case ICmpInst::ICMP_ULE: 02561 return getFalse(ITy); 02562 } 02563 } 02564 02565 // x udiv y <=u x. 02566 if (LBO && match(LBO, m_UDiv(m_Specific(RHS), m_Value()))) { 02567 // icmp pred (X /u Y), X 02568 if (Pred == ICmpInst::ICMP_UGT) 02569 return getFalse(ITy); 02570 if (Pred == ICmpInst::ICMP_ULE) 02571 return getTrue(ITy); 02572 } 02573 02574 // handle: 02575 // CI2 << X == CI 02576 // CI2 << X != CI 02577 // 02578 // where CI2 is a power of 2 and CI isn't 02579 if (auto *CI = dyn_cast<ConstantInt>(RHS)) { 02580 const APInt *CI2Val, *CIVal = &CI->getValue(); 02581 if (LBO && match(LBO, m_Shl(m_APInt(CI2Val), m_Value())) && 02582 CI2Val->isPowerOf2()) { 02583 if (!CIVal->isPowerOf2()) { 02584 // CI2 << X can equal zero in some circumstances, 02585 // this simplification is unsafe if CI is zero. 02586 // 02587 // We know it is safe if: 02588 // - The shift is nsw, we can't shift out the one bit. 02589 // - The shift is nuw, we can't shift out the one bit. 02590 // - CI2 is one 02591 // - CI isn't zero 02592 if (LBO->hasNoSignedWrap() || LBO->hasNoUnsignedWrap() || 02593 *CI2Val == 1 || !CI->isZero()) { 02594 if (Pred == ICmpInst::ICMP_EQ) 02595 return ConstantInt::getFalse(RHS->getContext()); 02596 if (Pred == ICmpInst::ICMP_NE) 02597 return ConstantInt::getTrue(RHS->getContext()); 02598 } 02599 } 02600 if (CIVal->isSignBit() && *CI2Val == 1) { 02601 if (Pred == ICmpInst::ICMP_UGT) 02602 return ConstantInt::getFalse(RHS->getContext()); 02603 if (Pred == ICmpInst::ICMP_ULE) 02604 return ConstantInt::getTrue(RHS->getContext()); 02605 } 02606 } 02607 } 02608 02609 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() && 02610 LBO->getOperand(1) == RBO->getOperand(1)) { 02611 switch (LBO->getOpcode()) { 02612 default: break; 02613 case Instruction::UDiv: 02614 case Instruction::LShr: 02615 if (ICmpInst::isSigned(Pred)) 02616 break; 02617 // fall-through 02618 case Instruction::SDiv: 02619 case Instruction::AShr: 02620 if (!LBO->isExact() || !RBO->isExact()) 02621 break; 02622 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 02623 RBO->getOperand(0), Q, MaxRecurse-1)) 02624 return V; 02625 break; 02626 case Instruction::Shl: { 02627 bool NUW = LBO->hasNoUnsignedWrap() && RBO->hasNoUnsignedWrap(); 02628 bool NSW = LBO->hasNoSignedWrap() && RBO->hasNoSignedWrap(); 02629 if (!NUW && !NSW) 02630 break; 02631 if (!NSW && ICmpInst::isSigned(Pred)) 02632 break; 02633 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 02634 RBO->getOperand(0), Q, MaxRecurse-1)) 02635 return V; 02636 break; 02637 } 02638 } 02639 } 02640 02641 // Simplify comparisons involving max/min. 02642 Value *A, *B; 02643 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE; 02644 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B". 02645 02646 // Signed variants on "max(a,b)>=a -> true". 02647 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { 02648 if (A != RHS) std::swap(A, B); // smax(A, B) pred A. 02649 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". 02650 // We analyze this as smax(A, B) pred A. 02651 P = Pred; 02652 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) && 02653 (A == LHS || B == LHS)) { 02654 if (A != LHS) std::swap(A, B); // A pred smax(A, B). 02655 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". 02656 // We analyze this as smax(A, B) swapped-pred A. 02657 P = CmpInst::getSwappedPredicate(Pred); 02658 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && 02659 (A == RHS || B == RHS)) { 02660 if (A != RHS) std::swap(A, B); // smin(A, B) pred A. 02661 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". 02662 // We analyze this as smax(-A, -B) swapped-pred -A. 02663 // Note that we do not need to actually form -A or -B thanks to EqP. 02664 P = CmpInst::getSwappedPredicate(Pred); 02665 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) && 02666 (A == LHS || B == LHS)) { 02667 if (A != LHS) std::swap(A, B); // A pred smin(A, B). 02668 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". 02669 // We analyze this as smax(-A, -B) pred -A. 02670 // Note that we do not need to actually form -A or -B thanks to EqP. 02671 P = Pred; 02672 } 02673 if (P != CmpInst::BAD_ICMP_PREDICATE) { 02674 // Cases correspond to "max(A, B) p A". 02675 switch (P) { 02676 default: 02677 break; 02678 case CmpInst::ICMP_EQ: 02679 case CmpInst::ICMP_SLE: 02680 // Equivalent to "A EqP B". This may be the same as the condition tested 02681 // in the max/min; if so, we can just return that. 02682 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B)) 02683 return V; 02684 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B)) 02685 return V; 02686 // Otherwise, see if "A EqP B" simplifies. 02687 if (MaxRecurse) 02688 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse-1)) 02689 return V; 02690 break; 02691 case CmpInst::ICMP_NE: 02692 case CmpInst::ICMP_SGT: { 02693 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP); 02694 // Equivalent to "A InvEqP B". This may be the same as the condition 02695 // tested in the max/min; if so, we can just return that. 02696 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B)) 02697 return V; 02698 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B)) 02699 return V; 02700 // Otherwise, see if "A InvEqP B" simplifies. 02701 if (MaxRecurse) 02702 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse-1)) 02703 return V; 02704 break; 02705 } 02706 case CmpInst::ICMP_SGE: 02707 // Always true. 02708 return getTrue(ITy); 02709 case CmpInst::ICMP_SLT: 02710 // Always false. 02711 return getFalse(ITy); 02712 } 02713 } 02714 02715 // Unsigned variants on "max(a,b)>=a -> true". 02716 P = CmpInst::BAD_ICMP_PREDICATE; 02717 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { 02718 if (A != RHS) std::swap(A, B); // umax(A, B) pred A. 02719 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". 02720 // We analyze this as umax(A, B) pred A. 02721 P = Pred; 02722 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) && 02723 (A == LHS || B == LHS)) { 02724 if (A != LHS) std::swap(A, B); // A pred umax(A, B). 02725 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". 02726 // We analyze this as umax(A, B) swapped-pred A. 02727 P = CmpInst::getSwappedPredicate(Pred); 02728 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && 02729 (A == RHS || B == RHS)) { 02730 if (A != RHS) std::swap(A, B); // umin(A, B) pred A. 02731 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". 02732 // We analyze this as umax(-A, -B) swapped-pred -A. 02733 // Note that we do not need to actually form -A or -B thanks to EqP. 02734 P = CmpInst::getSwappedPredicate(Pred); 02735 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) && 02736 (A == LHS || B == LHS)) { 02737 if (A != LHS) std::swap(A, B); // A pred umin(A, B). 02738 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". 02739 // We analyze this as umax(-A, -B) pred -A. 02740 // Note that we do not need to actually form -A or -B thanks to EqP. 02741 P = Pred; 02742 } 02743 if (P != CmpInst::BAD_ICMP_PREDICATE) { 02744 // Cases correspond to "max(A, B) p A". 02745 switch (P) { 02746 default: 02747 break; 02748 case CmpInst::ICMP_EQ: 02749 case CmpInst::ICMP_ULE: 02750 // Equivalent to "A EqP B". This may be the same as the condition tested 02751 // in the max/min; if so, we can just return that. 02752 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B)) 02753 return V; 02754 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B)) 02755 return V; 02756 // Otherwise, see if "A EqP B" simplifies. 02757 if (MaxRecurse) 02758 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse-1)) 02759 return V; 02760 break; 02761 case CmpInst::ICMP_NE: 02762 case CmpInst::ICMP_UGT: { 02763 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP); 02764 // Equivalent to "A InvEqP B". This may be the same as the condition 02765 // tested in the max/min; if so, we can just return that. 02766 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B)) 02767 return V; 02768 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B)) 02769 return V; 02770 // Otherwise, see if "A InvEqP B" simplifies. 02771 if (MaxRecurse) 02772 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse-1)) 02773 return V; 02774 break; 02775 } 02776 case CmpInst::ICMP_UGE: 02777 // Always true. 02778 return getTrue(ITy); 02779 case CmpInst::ICMP_ULT: 02780 // Always false. 02781 return getFalse(ITy); 02782 } 02783 } 02784 02785 // Variants on "max(x,y) >= min(x,z)". 02786 Value *C, *D; 02787 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && 02788 match(RHS, m_SMin(m_Value(C), m_Value(D))) && 02789 (A == C || A == D || B == C || B == D)) { 02790 // max(x, ?) pred min(x, ?). 02791 if (Pred == CmpInst::ICMP_SGE) 02792 // Always true. 02793 return getTrue(ITy); 02794 if (Pred == CmpInst::ICMP_SLT) 02795 // Always false. 02796 return getFalse(ITy); 02797 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && 02798 match(RHS, m_SMax(m_Value(C), m_Value(D))) && 02799 (A == C || A == D || B == C || B == D)) { 02800 // min(x, ?) pred max(x, ?). 02801 if (Pred == CmpInst::ICMP_SLE) 02802 // Always true. 02803 return getTrue(ITy); 02804 if (Pred == CmpInst::ICMP_SGT) 02805 // Always false. 02806 return getFalse(ITy); 02807 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && 02808 match(RHS, m_UMin(m_Value(C), m_Value(D))) && 02809 (A == C || A == D || B == C || B == D)) { 02810 // max(x, ?) pred min(x, ?). 02811 if (Pred == CmpInst::ICMP_UGE) 02812 // Always true. 02813 return getTrue(ITy); 02814 if (Pred == CmpInst::ICMP_ULT) 02815 // Always false. 02816 return getFalse(ITy); 02817 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && 02818 match(RHS, m_UMax(m_Value(C), m_Value(D))) && 02819 (A == C || A == D || B == C || B == D)) { 02820 // min(x, ?) pred max(x, ?). 02821 if (Pred == CmpInst::ICMP_ULE) 02822 // Always true. 02823 return getTrue(ITy); 02824 if (Pred == CmpInst::ICMP_UGT) 02825 // Always false. 02826 return getFalse(ITy); 02827 } 02828 02829 // Simplify comparisons of related pointers using a powerful, recursive 02830 // GEP-walk when we have target data available.. 02831 if (LHS->getType()->isPointerTy()) 02832 if (Constant *C = computePointerICmp(Q.DL, Q.TLI, Pred, LHS, RHS)) 02833 return C; 02834 02835 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) { 02836 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) { 02837 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() && 02838 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() && 02839 (ICmpInst::isEquality(Pred) || 02840 (GLHS->isInBounds() && GRHS->isInBounds() && 02841 Pred == ICmpInst::getSignedPredicate(Pred)))) { 02842 // The bases are equal and the indices are constant. Build a constant 02843 // expression GEP with the same indices and a null base pointer to see 02844 // what constant folding can make out of it. 02845 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType()); 02846 SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end()); 02847 Constant *NewLHS = ConstantExpr::getGetElementPtr(Null, IndicesLHS); 02848 02849 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end()); 02850 Constant *NewRHS = ConstantExpr::getGetElementPtr(Null, IndicesRHS); 02851 return ConstantExpr::getICmp(Pred, NewLHS, NewRHS); 02852 } 02853 } 02854 } 02855 02856 // If the comparison is with the result of a select instruction, check whether 02857 // comparing with either branch of the select always yields the same value. 02858 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 02859 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) 02860 return V; 02861 02862 // If the comparison is with the result of a phi instruction, check whether 02863 // doing the compare with each incoming phi value yields a common result. 02864 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 02865 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) 02866 return V; 02867 02868 return nullptr; 02869 } 02870 02871 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 02872 const DataLayout *DL, 02873 const TargetLibraryInfo *TLI, 02874 const DominatorTree *DT, 02875 AssumptionTracker *AT, 02876 Instruction *CxtI) { 02877 return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT, AT, CxtI), 02878 RecursionLimit); 02879 } 02880 02881 /// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can 02882 /// fold the result. If not, this returns null. 02883 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 02884 const Query &Q, unsigned MaxRecurse) { 02885 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; 02886 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!"); 02887 02888 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 02889 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 02890 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); 02891 02892 // If we have a constant, make sure it is on the RHS. 02893 std::swap(LHS, RHS); 02894 Pred = CmpInst::getSwappedPredicate(Pred); 02895 } 02896 02897 // Fold trivial predicates. 02898 if (Pred == FCmpInst::FCMP_FALSE) 02899 return ConstantInt::get(GetCompareTy(LHS), 0); 02900 if (Pred == FCmpInst::FCMP_TRUE) 02901 return ConstantInt::get(GetCompareTy(LHS), 1); 02902 02903 if (isa<UndefValue>(RHS)) // fcmp pred X, undef -> undef 02904 return UndefValue::get(GetCompareTy(LHS)); 02905 02906 // fcmp x,x -> true/false. Not all compares are foldable. 02907 if (LHS == RHS) { 02908 if (CmpInst::isTrueWhenEqual(Pred)) 02909 return ConstantInt::get(GetCompareTy(LHS), 1); 02910 if (CmpInst::isFalseWhenEqual(Pred)) 02911 return ConstantInt::get(GetCompareTy(LHS), 0); 02912 } 02913 02914 // Handle fcmp with constant RHS 02915 if (Constant *RHSC = dyn_cast<Constant>(RHS)) { 02916 // If the constant is a nan, see if we can fold the comparison based on it. 02917 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHSC)) { 02918 if (CFP->getValueAPF().isNaN()) { 02919 if (FCmpInst::isOrdered(Pred)) // True "if ordered and foo" 02920 return ConstantInt::getFalse(CFP->getContext()); 02921 assert(FCmpInst::isUnordered(Pred) && 02922 "Comparison must be either ordered or unordered!"); 02923 // True if unordered. 02924 return ConstantInt::getTrue(CFP->getContext()); 02925 } 02926 // Check whether the constant is an infinity. 02927 if (CFP->getValueAPF().isInfinity()) { 02928 if (CFP->getValueAPF().isNegative()) { 02929 switch (Pred) { 02930 case FCmpInst::FCMP_OLT: 02931 // No value is ordered and less than negative infinity. 02932 return ConstantInt::getFalse(CFP->getContext()); 02933 case FCmpInst::FCMP_UGE: 02934 // All values are unordered with or at least negative infinity. 02935 return ConstantInt::getTrue(CFP->getContext()); 02936 default: 02937 break; 02938 } 02939 } else { 02940 switch (Pred) { 02941 case FCmpInst::FCMP_OGT: 02942 // No value is ordered and greater than infinity. 02943 return ConstantInt::getFalse(CFP->getContext()); 02944 case FCmpInst::FCMP_ULE: 02945 // All values are unordered with and at most infinity. 02946 return ConstantInt::getTrue(CFP->getContext()); 02947 default: 02948 break; 02949 } 02950 } 02951 } 02952 } 02953 } 02954 02955 // If the comparison is with the result of a select instruction, check whether 02956 // comparing with either branch of the select always yields the same value. 02957 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 02958 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) 02959 return V; 02960 02961 // If the comparison is with the result of a phi instruction, check whether 02962 // doing the compare with each incoming phi value yields a common result. 02963 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 02964 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) 02965 return V; 02966 02967 return nullptr; 02968 } 02969 02970 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 02971 const DataLayout *DL, 02972 const TargetLibraryInfo *TLI, 02973 const DominatorTree *DT, 02974 AssumptionTracker *AT, 02975 const Instruction *CxtI) { 02976 return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT, AT, CxtI), 02977 RecursionLimit); 02978 } 02979 02980 /// SimplifySelectInst - Given operands for a SelectInst, see if we can fold 02981 /// the result. If not, this returns null. 02982 static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal, 02983 Value *FalseVal, const Query &Q, 02984 unsigned MaxRecurse) { 02985 // select true, X, Y -> X 02986 // select false, X, Y -> Y 02987 if (Constant *CB = dyn_cast<Constant>(CondVal)) { 02988 if (CB->isAllOnesValue()) 02989 return TrueVal; 02990 if (CB->isNullValue()) 02991 return FalseVal; 02992 } 02993 02994 // select C, X, X -> X 02995 if (TrueVal == FalseVal) 02996 return TrueVal; 02997 02998 if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y 02999 if (isa<Constant>(TrueVal)) 03000 return TrueVal; 03001 return FalseVal; 03002 } 03003 if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X 03004 return FalseVal; 03005 if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X 03006 return TrueVal; 03007 03008 return nullptr; 03009 } 03010 03011 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, 03012 const DataLayout *DL, 03013 const TargetLibraryInfo *TLI, 03014 const DominatorTree *DT, 03015 AssumptionTracker *AT, 03016 const Instruction *CxtI) { 03017 return ::SimplifySelectInst(Cond, TrueVal, FalseVal, 03018 Query (DL, TLI, DT, AT, CxtI), RecursionLimit); 03019 } 03020 03021 /// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can 03022 /// fold the result. If not, this returns null. 03023 static Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const Query &Q, unsigned) { 03024 // The type of the GEP pointer operand. 03025 PointerType *PtrTy = cast<PointerType>(Ops[0]->getType()->getScalarType()); 03026 unsigned AS = PtrTy->getAddressSpace(); 03027 03028 // getelementptr P -> P. 03029 if (Ops.size() == 1) 03030 return Ops[0]; 03031 03032 // Compute the (pointer) type returned by the GEP instruction. 03033 Type *LastType = GetElementPtrInst::getIndexedType(PtrTy, Ops.slice(1)); 03034 Type *GEPTy = PointerType::get(LastType, AS); 03035 if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType())) 03036 GEPTy = VectorType::get(GEPTy, VT->getNumElements()); 03037 03038 if (isa<UndefValue>(Ops[0])) 03039 return UndefValue::get(GEPTy); 03040 03041 if (Ops.size() == 2) { 03042 // getelementptr P, 0 -> P. 03043 if (match(Ops[1], m_Zero())) 03044 return Ops[0]; 03045 03046 Type *Ty = PtrTy->getElementType(); 03047 if (Q.DL && Ty->isSized()) { 03048 Value *P; 03049 uint64_t C; 03050 uint64_t TyAllocSize = Q.DL->getTypeAllocSize(Ty); 03051 // getelementptr P, N -> P if P points to a type of zero size. 03052 if (TyAllocSize == 0) 03053 return Ops[0]; 03054 03055 // The following transforms are only safe if the ptrtoint cast 03056 // doesn't truncate the pointers. 03057 if (Ops[1]->getType()->getScalarSizeInBits() == 03058 Q.DL->getPointerSizeInBits(AS)) { 03059 auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * { 03060 if (match(P, m_Zero())) 03061 return Constant::getNullValue(GEPTy); 03062 Value *Temp; 03063 if (match(P, m_PtrToInt(m_Value(Temp)))) 03064 if (Temp->getType() == GEPTy) 03065 return Temp; 03066 return nullptr; 03067 }; 03068 03069 // getelementptr V, (sub P, V) -> P if P points to a type of size 1. 03070 if (TyAllocSize == 1 && 03071 match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))))) 03072 if (Value *R = PtrToIntOrZero(P)) 03073 return R; 03074 03075 // getelementptr V, (ashr (sub P, V), C) -> Q 03076 // if P points to a type of size 1 << C. 03077 if (match(Ops[1], 03078 m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))), 03079 m_ConstantInt(C))) && 03080 TyAllocSize == 1ULL << C) 03081 if (Value *R = PtrToIntOrZero(P)) 03082 return R; 03083 03084 // getelementptr V, (sdiv (sub P, V), C) -> Q 03085 // if P points to a type of size C. 03086 if (match(Ops[1], 03087 m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))), 03088 m_SpecificInt(TyAllocSize)))) 03089 if (Value *R = PtrToIntOrZero(P)) 03090 return R; 03091 } 03092 } 03093 } 03094 03095 // Check to see if this is constant foldable. 03096 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 03097 if (!isa<Constant>(Ops[i])) 03098 return nullptr; 03099 03100 return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]), Ops.slice(1)); 03101 } 03102 03103 Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *DL, 03104 const TargetLibraryInfo *TLI, 03105 const DominatorTree *DT, AssumptionTracker *AT, 03106 const Instruction *CxtI) { 03107 return ::SimplifyGEPInst(Ops, Query (DL, TLI, DT, AT, CxtI), RecursionLimit); 03108 } 03109 03110 /// SimplifyInsertValueInst - Given operands for an InsertValueInst, see if we 03111 /// can fold the result. If not, this returns null. 03112 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val, 03113 ArrayRef<unsigned> Idxs, const Query &Q, 03114 unsigned) { 03115 if (Constant *CAgg = dyn_cast<Constant>(Agg)) 03116 if (Constant *CVal = dyn_cast<Constant>(Val)) 03117 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs); 03118 03119 // insertvalue x, undef, n -> x 03120 if (match(Val, m_Undef())) 03121 return Agg; 03122 03123 // insertvalue x, (extractvalue y, n), n 03124 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val)) 03125 if (EV->getAggregateOperand()->getType() == Agg->getType() && 03126 EV->getIndices() == Idxs) { 03127 // insertvalue undef, (extractvalue y, n), n -> y 03128 if (match(Agg, m_Undef())) 03129 return EV->getAggregateOperand(); 03130 03131 // insertvalue y, (extractvalue y, n), n -> y 03132 if (Agg == EV->getAggregateOperand()) 03133 return Agg; 03134 } 03135 03136 return nullptr; 03137 } 03138 03139 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val, 03140 ArrayRef<unsigned> Idxs, 03141 const DataLayout *DL, 03142 const TargetLibraryInfo *TLI, 03143 const DominatorTree *DT, 03144 AssumptionTracker *AT, 03145 const Instruction *CxtI) { 03146 return ::SimplifyInsertValueInst(Agg, Val, Idxs, 03147 Query (DL, TLI, DT, AT, CxtI), 03148 RecursionLimit); 03149 } 03150 03151 /// SimplifyPHINode - See if we can fold the given phi. If not, returns null. 03152 static Value *SimplifyPHINode(PHINode *PN, const Query &Q) { 03153 // If all of the PHI's incoming values are the same then replace the PHI node 03154 // with the common value. 03155 Value *CommonValue = nullptr; 03156 bool HasUndefInput = false; 03157 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 03158 Value *Incoming = PN->getIncomingValue(i); 03159 // If the incoming value is the phi node itself, it can safely be skipped. 03160 if (Incoming == PN) continue; 03161 if (isa<UndefValue>(Incoming)) { 03162 // Remember that we saw an undef value, but otherwise ignore them. 03163 HasUndefInput = true; 03164 continue; 03165 } 03166 if (CommonValue && Incoming != CommonValue) 03167 return nullptr; // Not the same, bail out. 03168 CommonValue = Incoming; 03169 } 03170 03171 // If CommonValue is null then all of the incoming values were either undef or 03172 // equal to the phi node itself. 03173 if (!CommonValue) 03174 return UndefValue::get(PN->getType()); 03175 03176 // If we have a PHI node like phi(X, undef, X), where X is defined by some 03177 // instruction, we cannot return X as the result of the PHI node unless it 03178 // dominates the PHI block. 03179 if (HasUndefInput) 03180 return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr; 03181 03182 return CommonValue; 03183 } 03184 03185 static Value *SimplifyTruncInst(Value *Op, Type *Ty, const Query &Q, unsigned) { 03186 if (Constant *C = dyn_cast<Constant>(Op)) 03187 return ConstantFoldInstOperands(Instruction::Trunc, Ty, C, Q.DL, Q.TLI); 03188 03189 return nullptr; 03190 } 03191 03192 Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *DL, 03193 const TargetLibraryInfo *TLI, 03194 const DominatorTree *DT, 03195 AssumptionTracker *AT, 03196 const Instruction *CxtI) { 03197 return ::SimplifyTruncInst(Op, Ty, Query (DL, TLI, DT, AT, CxtI), 03198 RecursionLimit); 03199 } 03200 03201 //=== Helper functions for higher up the class hierarchy. 03202 03203 /// SimplifyBinOp - Given operands for a BinaryOperator, see if we can 03204 /// fold the result. If not, this returns null. 03205 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 03206 const Query &Q, unsigned MaxRecurse) { 03207 switch (Opcode) { 03208 case Instruction::Add: 03209 return SimplifyAddInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false, 03210 Q, MaxRecurse); 03211 case Instruction::FAdd: 03212 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 03213 03214 case Instruction::Sub: 03215 return SimplifySubInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false, 03216 Q, MaxRecurse); 03217 case Instruction::FSub: 03218 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 03219 03220 case Instruction::Mul: return SimplifyMulInst (LHS, RHS, Q, MaxRecurse); 03221 case Instruction::FMul: 03222 return SimplifyFMulInst (LHS, RHS, FastMathFlags(), Q, MaxRecurse); 03223 case Instruction::SDiv: return SimplifySDivInst(LHS, RHS, Q, MaxRecurse); 03224 case Instruction::UDiv: return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse); 03225 case Instruction::FDiv: return SimplifyFDivInst(LHS, RHS, Q, MaxRecurse); 03226 case Instruction::SRem: return SimplifySRemInst(LHS, RHS, Q, MaxRecurse); 03227 case Instruction::URem: return SimplifyURemInst(LHS, RHS, Q, MaxRecurse); 03228 case Instruction::FRem: return SimplifyFRemInst(LHS, RHS, Q, MaxRecurse); 03229 case Instruction::Shl: 03230 return SimplifyShlInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false, 03231 Q, MaxRecurse); 03232 case Instruction::LShr: 03233 return SimplifyLShrInst(LHS, RHS, /*isExact*/false, Q, MaxRecurse); 03234 case Instruction::AShr: 03235 return SimplifyAShrInst(LHS, RHS, /*isExact*/false, Q, MaxRecurse); 03236 case Instruction::And: return SimplifyAndInst(LHS, RHS, Q, MaxRecurse); 03237 case Instruction::Or: return SimplifyOrInst (LHS, RHS, Q, MaxRecurse); 03238 case Instruction::Xor: return SimplifyXorInst(LHS, RHS, Q, MaxRecurse); 03239 default: 03240 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 03241 if (Constant *CRHS = dyn_cast<Constant>(RHS)) { 03242 Constant *COps[] = {CLHS, CRHS}; 03243 return ConstantFoldInstOperands(Opcode, LHS->getType(), COps, Q.DL, 03244 Q.TLI); 03245 } 03246 03247 // If the operation is associative, try some generic simplifications. 03248 if (Instruction::isAssociative(Opcode)) 03249 if (Value *V = SimplifyAssociativeBinOp(Opcode, LHS, RHS, Q, MaxRecurse)) 03250 return V; 03251 03252 // If the operation is with the result of a select instruction check whether 03253 // operating on either branch of the select always yields the same value. 03254 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 03255 if (Value *V = ThreadBinOpOverSelect(Opcode, LHS, RHS, Q, MaxRecurse)) 03256 return V; 03257 03258 // If the operation is with the result of a phi instruction, check whether 03259 // operating on all incoming values of the phi always yields the same value. 03260 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 03261 if (Value *V = ThreadBinOpOverPHI(Opcode, LHS, RHS, Q, MaxRecurse)) 03262 return V; 03263 03264 return nullptr; 03265 } 03266 } 03267 03268 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 03269 const DataLayout *DL, const TargetLibraryInfo *TLI, 03270 const DominatorTree *DT, AssumptionTracker *AT, 03271 const Instruction *CxtI) { 03272 return ::SimplifyBinOp(Opcode, LHS, RHS, Query (DL, TLI, DT, AT, CxtI), 03273 RecursionLimit); 03274 } 03275 03276 /// SimplifyCmpInst - Given operands for a CmpInst, see if we can 03277 /// fold the result. 03278 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 03279 const Query &Q, unsigned MaxRecurse) { 03280 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate)) 03281 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse); 03282 return SimplifyFCmpInst(Predicate, LHS, RHS, Q, MaxRecurse); 03283 } 03284 03285 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 03286 const DataLayout *DL, const TargetLibraryInfo *TLI, 03287 const DominatorTree *DT, AssumptionTracker *AT, 03288 const Instruction *CxtI) { 03289 return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT, AT, CxtI), 03290 RecursionLimit); 03291 } 03292 03293 static bool IsIdempotent(Intrinsic::ID ID) { 03294 switch (ID) { 03295 default: return false; 03296 03297 // Unary idempotent: f(f(x)) = f(x) 03298 case Intrinsic::fabs: 03299 case Intrinsic::floor: 03300 case Intrinsic::ceil: 03301 case Intrinsic::trunc: 03302 case Intrinsic::rint: 03303 case Intrinsic::nearbyint: 03304 case Intrinsic::round: 03305 return true; 03306 } 03307 } 03308 03309 template <typename IterTy> 03310 static Value *SimplifyIntrinsic(Intrinsic::ID IID, IterTy ArgBegin, IterTy ArgEnd, 03311 const Query &Q, unsigned MaxRecurse) { 03312 // Perform idempotent optimizations 03313 if (!IsIdempotent(IID)) 03314 return nullptr; 03315 03316 // Unary Ops 03317 if (std::distance(ArgBegin, ArgEnd) == 1) 03318 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*ArgBegin)) 03319 if (II->getIntrinsicID() == IID) 03320 return II; 03321 03322 return nullptr; 03323 } 03324 03325 template <typename IterTy> 03326 static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd, 03327 const Query &Q, unsigned MaxRecurse) { 03328 Type *Ty = V->getType(); 03329 if (PointerType *PTy = dyn_cast<PointerType>(Ty)) 03330 Ty = PTy->getElementType(); 03331 FunctionType *FTy = cast<FunctionType>(Ty); 03332 03333 // call undef -> undef 03334 if (isa<UndefValue>(V)) 03335 return UndefValue::get(FTy->getReturnType()); 03336 03337 Function *F = dyn_cast<Function>(V); 03338 if (!F) 03339 return nullptr; 03340 03341 if (unsigned IID = F->getIntrinsicID()) 03342 if (Value *Ret = 03343 SimplifyIntrinsic((Intrinsic::ID) IID, ArgBegin, ArgEnd, Q, MaxRecurse)) 03344 return Ret; 03345 03346 if (!canConstantFoldCallTo(F)) 03347 return nullptr; 03348 03349 SmallVector<Constant *, 4> ConstantArgs; 03350 ConstantArgs.reserve(ArgEnd - ArgBegin); 03351 for (IterTy I = ArgBegin, E = ArgEnd; I != E; ++I) { 03352 Constant *C = dyn_cast<Constant>(*I); 03353 if (!C) 03354 return nullptr; 03355 ConstantArgs.push_back(C); 03356 } 03357 03358 return ConstantFoldCall(F, ConstantArgs, Q.TLI); 03359 } 03360 03361 Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin, 03362 User::op_iterator ArgEnd, const DataLayout *DL, 03363 const TargetLibraryInfo *TLI, 03364 const DominatorTree *DT, AssumptionTracker *AT, 03365 const Instruction *CxtI) { 03366 return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(DL, TLI, DT, AT, CxtI), 03367 RecursionLimit); 03368 } 03369 03370 Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args, 03371 const DataLayout *DL, const TargetLibraryInfo *TLI, 03372 const DominatorTree *DT, AssumptionTracker *AT, 03373 const Instruction *CxtI) { 03374 return ::SimplifyCall(V, Args.begin(), Args.end(), 03375 Query(DL, TLI, DT, AT, CxtI), RecursionLimit); 03376 } 03377 03378 /// SimplifyInstruction - See if we can compute a simplified version of this 03379 /// instruction. If not, this returns null. 03380 Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *DL, 03381 const TargetLibraryInfo *TLI, 03382 const DominatorTree *DT, 03383 AssumptionTracker *AT) { 03384 Value *Result; 03385 03386 switch (I->getOpcode()) { 03387 default: 03388 Result = ConstantFoldInstruction(I, DL, TLI); 03389 break; 03390 case Instruction::FAdd: 03391 Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1), 03392 I->getFastMathFlags(), DL, TLI, DT, AT, I); 03393 break; 03394 case Instruction::Add: 03395 Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1), 03396 cast<BinaryOperator>(I)->hasNoSignedWrap(), 03397 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), 03398 DL, TLI, DT, AT, I); 03399 break; 03400 case Instruction::FSub: 03401 Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1), 03402 I->getFastMathFlags(), DL, TLI, DT, AT, I); 03403 break; 03404 case Instruction::Sub: 03405 Result = SimplifySubInst(I->getOperand(0), I->getOperand(1), 03406 cast<BinaryOperator>(I)->hasNoSignedWrap(), 03407 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), 03408 DL, TLI, DT, AT, I); 03409 break; 03410 case Instruction::FMul: 03411 Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1), 03412 I->getFastMathFlags(), DL, TLI, DT, AT, I); 03413 break; 03414 case Instruction::Mul: 03415 Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), 03416 DL, TLI, DT, AT, I); 03417 break; 03418 case Instruction::SDiv: 03419 Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), 03420 DL, TLI, DT, AT, I); 03421 break; 03422 case Instruction::UDiv: 03423 Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), 03424 DL, TLI, DT, AT, I); 03425 break; 03426 case Instruction::FDiv: 03427 Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), 03428 DL, TLI, DT, AT, I); 03429 break; 03430 case Instruction::SRem: 03431 Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), 03432 DL, TLI, DT, AT, I); 03433 break; 03434 case Instruction::URem: 03435 Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), 03436 DL, TLI, DT, AT, I); 03437 break; 03438 case Instruction::FRem: 03439 Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), 03440 DL, TLI, DT, AT, I); 03441 break; 03442 case Instruction::Shl: 03443 Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1), 03444 cast<BinaryOperator>(I)->hasNoSignedWrap(), 03445 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), 03446 DL, TLI, DT, AT, I); 03447 break; 03448 case Instruction::LShr: 03449 Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1), 03450 cast<BinaryOperator>(I)->isExact(), 03451 DL, TLI, DT, AT, I); 03452 break; 03453 case Instruction::AShr: 03454 Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1), 03455 cast<BinaryOperator>(I)->isExact(), 03456 DL, TLI, DT, AT, I); 03457 break; 03458 case Instruction::And: 03459 Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), 03460 DL, TLI, DT, AT, I); 03461 break; 03462 case Instruction::Or: 03463 Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, 03464 AT, I); 03465 break; 03466 case Instruction::Xor: 03467 Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), 03468 DL, TLI, DT, AT, I); 03469 break; 03470 case Instruction::ICmp: 03471 Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), 03472 I->getOperand(0), I->getOperand(1), 03473 DL, TLI, DT, AT, I); 03474 break; 03475 case Instruction::FCmp: 03476 Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), 03477 I->getOperand(0), I->getOperand(1), 03478 DL, TLI, DT, AT, I); 03479 break; 03480 case Instruction::Select: 03481 Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1), 03482 I->getOperand(2), DL, TLI, DT, AT, I); 03483 break; 03484 case Instruction::GetElementPtr: { 03485 SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end()); 03486 Result = SimplifyGEPInst(Ops, DL, TLI, DT, AT, I); 03487 break; 03488 } 03489 case Instruction::InsertValue: { 03490 InsertValueInst *IV = cast<InsertValueInst>(I); 03491 Result = SimplifyInsertValueInst(IV->getAggregateOperand(), 03492 IV->getInsertedValueOperand(), 03493 IV->getIndices(), DL, TLI, DT, AT, I); 03494 break; 03495 } 03496 case Instruction::PHI: 03497 Result = SimplifyPHINode(cast<PHINode>(I), Query (DL, TLI, DT, AT, I)); 03498 break; 03499 case Instruction::Call: { 03500 CallSite CS(cast<CallInst>(I)); 03501 Result = SimplifyCall(CS.getCalledValue(), CS.arg_begin(), CS.arg_end(), 03502 DL, TLI, DT, AT, I); 03503 break; 03504 } 03505 case Instruction::Trunc: 03506 Result = SimplifyTruncInst(I->getOperand(0), I->getType(), DL, TLI, DT, 03507 AT, I); 03508 break; 03509 } 03510 03511 /// If called on unreachable code, the above logic may report that the 03512 /// instruction simplified to itself. Make life easier for users by 03513 /// detecting that case here, returning a safe value instead. 03514 return Result == I ? UndefValue::get(I->getType()) : Result; 03515 } 03516 03517 /// \brief Implementation of recursive simplification through an instructions 03518 /// uses. 03519 /// 03520 /// This is the common implementation of the recursive simplification routines. 03521 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to 03522 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of 03523 /// instructions to process and attempt to simplify it using 03524 /// InstructionSimplify. 03525 /// 03526 /// This routine returns 'true' only when *it* simplifies something. The passed 03527 /// in simplified value does not count toward this. 03528 static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, 03529 const DataLayout *DL, 03530 const TargetLibraryInfo *TLI, 03531 const DominatorTree *DT, 03532 AssumptionTracker *AT) { 03533 bool Simplified = false; 03534 SmallSetVector<Instruction *, 8> Worklist; 03535 03536 // If we have an explicit value to collapse to, do that round of the 03537 // simplification loop by hand initially. 03538 if (SimpleV) { 03539 for (User *U : I->users()) 03540 if (U != I) 03541 Worklist.insert(cast<Instruction>(U)); 03542 03543 // Replace the instruction with its simplified value. 03544 I->replaceAllUsesWith(SimpleV); 03545 03546 // Gracefully handle edge cases where the instruction is not wired into any 03547 // parent block. 03548 if (I->getParent()) 03549 I->eraseFromParent(); 03550 } else { 03551 Worklist.insert(I); 03552 } 03553 03554 // Note that we must test the size on each iteration, the worklist can grow. 03555 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) { 03556 I = Worklist[Idx]; 03557 03558 // See if this instruction simplifies. 03559 SimpleV = SimplifyInstruction(I, DL, TLI, DT, AT); 03560 if (!SimpleV) 03561 continue; 03562 03563 Simplified = true; 03564 03565 // Stash away all the uses of the old instruction so we can check them for 03566 // recursive simplifications after a RAUW. This is cheaper than checking all 03567 // uses of To on the recursive step in most cases. 03568 for (User *U : I->users()) 03569 Worklist.insert(cast<Instruction>(U)); 03570 03571 // Replace the instruction with its simplified value. 03572 I->replaceAllUsesWith(SimpleV); 03573 03574 // Gracefully handle edge cases where the instruction is not wired into any 03575 // parent block. 03576 if (I->getParent()) 03577 I->eraseFromParent(); 03578 } 03579 return Simplified; 03580 } 03581 03582 bool llvm::recursivelySimplifyInstruction(Instruction *I, 03583 const DataLayout *DL, 03584 const TargetLibraryInfo *TLI, 03585 const DominatorTree *DT, 03586 AssumptionTracker *AT) { 03587 return replaceAndRecursivelySimplifyImpl(I, nullptr, DL, TLI, DT, AT); 03588 } 03589 03590 bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, 03591 const DataLayout *DL, 03592 const TargetLibraryInfo *TLI, 03593 const DominatorTree *DT, 03594 AssumptionTracker *AT) { 03595 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!"); 03596 assert(SimpleV && "Must provide a simplified value."); 03597 return replaceAndRecursivelySimplifyImpl(I, SimpleV, DL, TLI, DT, AT); 03598 }