LLVM API Documentation
00001 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This file contains routines that help analyze properties that chains of 00011 // computations have. 00012 // 00013 //===----------------------------------------------------------------------===// 00014 00015 #include "llvm/Analysis/ValueTracking.h" 00016 #include "llvm/Analysis/AssumptionTracker.h" 00017 #include "llvm/ADT/SmallPtrSet.h" 00018 #include "llvm/Analysis/InstructionSimplify.h" 00019 #include "llvm/Analysis/MemoryBuiltins.h" 00020 #include "llvm/IR/CallSite.h" 00021 #include "llvm/IR/ConstantRange.h" 00022 #include "llvm/IR/Constants.h" 00023 #include "llvm/IR/DataLayout.h" 00024 #include "llvm/IR/Dominators.h" 00025 #include "llvm/IR/GetElementPtrTypeIterator.h" 00026 #include "llvm/IR/GlobalAlias.h" 00027 #include "llvm/IR/GlobalVariable.h" 00028 #include "llvm/IR/Instructions.h" 00029 #include "llvm/IR/IntrinsicInst.h" 00030 #include "llvm/IR/LLVMContext.h" 00031 #include "llvm/IR/Metadata.h" 00032 #include "llvm/IR/Operator.h" 00033 #include "llvm/IR/PatternMatch.h" 00034 #include "llvm/Support/Debug.h" 00035 #include "llvm/Support/MathExtras.h" 00036 #include <cstring> 00037 using namespace llvm; 00038 using namespace llvm::PatternMatch; 00039 00040 const unsigned MaxDepth = 6; 00041 00042 /// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if 00043 /// unknown returns 0). For vector types, returns the element type's bitwidth. 00044 static unsigned getBitWidth(Type *Ty, const DataLayout *TD) { 00045 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 00046 return BitWidth; 00047 00048 return TD ? TD->getPointerTypeSizeInBits(Ty) : 0; 00049 } 00050 00051 // Many of these functions have internal versions that take an assumption 00052 // exclusion set. This is because of the potential for mutual recursion to 00053 // cause computeKnownBits to repeatedly visit the same assume intrinsic. The 00054 // classic case of this is assume(x = y), which will attempt to determine 00055 // bits in x from bits in y, which will attempt to determine bits in y from 00056 // bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 00057 // isKnownNonZero, which calls computeKnownBits and ComputeSignBit and 00058 // isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so on. 00059 typedef SmallPtrSet<const Value *, 8> ExclInvsSet; 00060 00061 namespace { 00062 // Simplifying using an assume can only be done in a particular control-flow 00063 // context (the context instruction provides that context). If an assume and 00064 // the context instruction are not in the same block then the DT helps in 00065 // figuring out if we can use it. 00066 struct Query { 00067 ExclInvsSet ExclInvs; 00068 AssumptionTracker *AT; 00069 const Instruction *CxtI; 00070 const DominatorTree *DT; 00071 00072 Query(AssumptionTracker *AT = nullptr, const Instruction *CxtI = nullptr, 00073 const DominatorTree *DT = nullptr) 00074 : AT(AT), CxtI(CxtI), DT(DT) {} 00075 00076 Query(const Query &Q, const Value *NewExcl) 00077 : ExclInvs(Q.ExclInvs), AT(Q.AT), CxtI(Q.CxtI), DT(Q.DT) { 00078 ExclInvs.insert(NewExcl); 00079 } 00080 }; 00081 } // end anonymous namespace 00082 00083 // Given the provided Value and, potentially, a context instruction, returned 00084 // the preferred context instruction (if any). 00085 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 00086 // If we've been provided with a context instruction, then use that (provided 00087 // it has been inserted). 00088 if (CxtI && CxtI->getParent()) 00089 return CxtI; 00090 00091 // If the value is really an already-inserted instruction, then use that. 00092 CxtI = dyn_cast<Instruction>(V); 00093 if (CxtI && CxtI->getParent()) 00094 return CxtI; 00095 00096 return nullptr; 00097 } 00098 00099 static void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, 00100 const DataLayout *TD, unsigned Depth, 00101 const Query &Q); 00102 00103 void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, 00104 const DataLayout *TD, unsigned Depth, 00105 AssumptionTracker *AT, const Instruction *CxtI, 00106 const DominatorTree *DT) { 00107 ::computeKnownBits(V, KnownZero, KnownOne, TD, Depth, 00108 Query(AT, safeCxtI(V, CxtI), DT)); 00109 } 00110 00111 static void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 00112 const DataLayout *TD, unsigned Depth, 00113 const Query &Q); 00114 00115 void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 00116 const DataLayout *TD, unsigned Depth, 00117 AssumptionTracker *AT, const Instruction *CxtI, 00118 const DominatorTree *DT) { 00119 ::ComputeSignBit(V, KnownZero, KnownOne, TD, Depth, 00120 Query(AT, safeCxtI(V, CxtI), DT)); 00121 } 00122 00123 static bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth, 00124 const Query &Q); 00125 00126 bool llvm::isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth, 00127 AssumptionTracker *AT, 00128 const Instruction *CxtI, 00129 const DominatorTree *DT) { 00130 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth, 00131 Query(AT, safeCxtI(V, CxtI), DT)); 00132 } 00133 00134 static bool isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth, 00135 const Query &Q); 00136 00137 bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth, 00138 AssumptionTracker *AT, const Instruction *CxtI, 00139 const DominatorTree *DT) { 00140 return ::isKnownNonZero(V, TD, Depth, Query(AT, safeCxtI(V, CxtI), DT)); 00141 } 00142 00143 static bool MaskedValueIsZero(Value *V, const APInt &Mask, 00144 const DataLayout *TD, unsigned Depth, 00145 const Query &Q); 00146 00147 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, 00148 const DataLayout *TD, unsigned Depth, 00149 AssumptionTracker *AT, const Instruction *CxtI, 00150 const DominatorTree *DT) { 00151 return ::MaskedValueIsZero(V, Mask, TD, Depth, 00152 Query(AT, safeCxtI(V, CxtI), DT)); 00153 } 00154 00155 static unsigned ComputeNumSignBits(Value *V, const DataLayout *TD, 00156 unsigned Depth, const Query &Q); 00157 00158 unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD, 00159 unsigned Depth, AssumptionTracker *AT, 00160 const Instruction *CxtI, 00161 const DominatorTree *DT) { 00162 return ::ComputeNumSignBits(V, TD, Depth, Query(AT, safeCxtI(V, CxtI), DT)); 00163 } 00164 00165 static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW, 00166 APInt &KnownZero, APInt &KnownOne, 00167 APInt &KnownZero2, APInt &KnownOne2, 00168 const DataLayout *TD, unsigned Depth, 00169 const Query &Q) { 00170 if (!Add) { 00171 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) { 00172 // We know that the top bits of C-X are clear if X contains less bits 00173 // than C (i.e. no wrap-around can happen). For example, 20-X is 00174 // positive if we can prove that X is >= 0 and < 16. 00175 if (!CLHS->getValue().isNegative()) { 00176 unsigned BitWidth = KnownZero.getBitWidth(); 00177 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros(); 00178 // NLZ can't be BitWidth with no sign bit 00179 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 00180 computeKnownBits(Op1, KnownZero2, KnownOne2, TD, Depth+1, Q); 00181 00182 // If all of the MaskV bits are known to be zero, then we know the 00183 // output top bits are zero, because we now know that the output is 00184 // from [0-C]. 00185 if ((KnownZero2 & MaskV) == MaskV) { 00186 unsigned NLZ2 = CLHS->getValue().countLeadingZeros(); 00187 // Top bits known zero. 00188 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2); 00189 } 00190 } 00191 } 00192 } 00193 00194 unsigned BitWidth = KnownZero.getBitWidth(); 00195 00196 // If an initial sequence of bits in the result is not needed, the 00197 // corresponding bits in the operands are not needed. 00198 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 00199 computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, TD, Depth+1, Q); 00200 computeKnownBits(Op1, KnownZero2, KnownOne2, TD, Depth+1, Q); 00201 00202 // Carry in a 1 for a subtract, rather than a 0. 00203 APInt CarryIn(BitWidth, 0); 00204 if (!Add) { 00205 // Sum = LHS + ~RHS + 1 00206 std::swap(KnownZero2, KnownOne2); 00207 CarryIn.setBit(0); 00208 } 00209 00210 APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn; 00211 APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn; 00212 00213 // Compute known bits of the carry. 00214 APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2); 00215 APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2; 00216 00217 // Compute set of known bits (where all three relevant bits are known). 00218 APInt LHSKnown = LHSKnownZero | LHSKnownOne; 00219 APInt RHSKnown = KnownZero2 | KnownOne2; 00220 APInt CarryKnown = CarryKnownZero | CarryKnownOne; 00221 APInt Known = LHSKnown & RHSKnown & CarryKnown; 00222 00223 assert((PossibleSumZero & Known) == (PossibleSumOne & Known) && 00224 "known bits of sum differ"); 00225 00226 // Compute known bits of the result. 00227 KnownZero = ~PossibleSumOne & Known; 00228 KnownOne = PossibleSumOne & Known; 00229 00230 // Are we still trying to solve for the sign bit? 00231 if (!Known.isNegative()) { 00232 if (NSW) { 00233 // Adding two non-negative numbers, or subtracting a negative number from 00234 // a non-negative one, can't wrap into negative. 00235 if (LHSKnownZero.isNegative() && KnownZero2.isNegative()) 00236 KnownZero |= APInt::getSignBit(BitWidth); 00237 // Adding two negative numbers, or subtracting a non-negative number from 00238 // a negative one, can't wrap into non-negative. 00239 else if (LHSKnownOne.isNegative() && KnownOne2.isNegative()) 00240 KnownOne |= APInt::getSignBit(BitWidth); 00241 } 00242 } 00243 } 00244 00245 static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW, 00246 APInt &KnownZero, APInt &KnownOne, 00247 APInt &KnownZero2, APInt &KnownOne2, 00248 const DataLayout *TD, unsigned Depth, 00249 const Query &Q) { 00250 unsigned BitWidth = KnownZero.getBitWidth(); 00251 computeKnownBits(Op1, KnownZero, KnownOne, TD, Depth+1, Q); 00252 computeKnownBits(Op0, KnownZero2, KnownOne2, TD, Depth+1, Q); 00253 00254 bool isKnownNegative = false; 00255 bool isKnownNonNegative = false; 00256 // If the multiplication is known not to overflow, compute the sign bit. 00257 if (NSW) { 00258 if (Op0 == Op1) { 00259 // The product of a number with itself is non-negative. 00260 isKnownNonNegative = true; 00261 } else { 00262 bool isKnownNonNegativeOp1 = KnownZero.isNegative(); 00263 bool isKnownNonNegativeOp0 = KnownZero2.isNegative(); 00264 bool isKnownNegativeOp1 = KnownOne.isNegative(); 00265 bool isKnownNegativeOp0 = KnownOne2.isNegative(); 00266 // The product of two numbers with the same sign is non-negative. 00267 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 00268 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 00269 // The product of a negative number and a non-negative number is either 00270 // negative or zero. 00271 if (!isKnownNonNegative) 00272 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 00273 isKnownNonZero(Op0, TD, Depth, Q)) || 00274 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 00275 isKnownNonZero(Op1, TD, Depth, Q)); 00276 } 00277 } 00278 00279 // If low bits are zero in either operand, output low known-0 bits. 00280 // Also compute a conserative estimate for high known-0 bits. 00281 // More trickiness is possible, but this is sufficient for the 00282 // interesting case of alignment computation. 00283 KnownOne.clearAllBits(); 00284 unsigned TrailZ = KnownZero.countTrailingOnes() + 00285 KnownZero2.countTrailingOnes(); 00286 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 00287 KnownZero2.countLeadingOnes(), 00288 BitWidth) - BitWidth; 00289 00290 TrailZ = std::min(TrailZ, BitWidth); 00291 LeadZ = std::min(LeadZ, BitWidth); 00292 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 00293 APInt::getHighBitsSet(BitWidth, LeadZ); 00294 00295 // Only make use of no-wrap flags if we failed to compute the sign bit 00296 // directly. This matters if the multiplication always overflows, in 00297 // which case we prefer to follow the result of the direct computation, 00298 // though as the program is invoking undefined behaviour we can choose 00299 // whatever we like here. 00300 if (isKnownNonNegative && !KnownOne.isNegative()) 00301 KnownZero.setBit(BitWidth - 1); 00302 else if (isKnownNegative && !KnownZero.isNegative()) 00303 KnownOne.setBit(BitWidth - 1); 00304 } 00305 00306 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 00307 APInt &KnownZero) { 00308 unsigned BitWidth = KnownZero.getBitWidth(); 00309 unsigned NumRanges = Ranges.getNumOperands() / 2; 00310 assert(NumRanges >= 1); 00311 00312 // Use the high end of the ranges to find leading zeros. 00313 unsigned MinLeadingZeros = BitWidth; 00314 for (unsigned i = 0; i < NumRanges; ++i) { 00315 ConstantInt *Lower = cast<ConstantInt>(Ranges.getOperand(2*i + 0)); 00316 ConstantInt *Upper = cast<ConstantInt>(Ranges.getOperand(2*i + 1)); 00317 ConstantRange Range(Lower->getValue(), Upper->getValue()); 00318 if (Range.isWrappedSet()) 00319 MinLeadingZeros = 0; // -1 has no zeros 00320 unsigned LeadingZeros = (Upper->getValue() - 1).countLeadingZeros(); 00321 MinLeadingZeros = std::min(LeadingZeros, MinLeadingZeros); 00322 } 00323 00324 KnownZero = APInt::getHighBitsSet(BitWidth, MinLeadingZeros); 00325 } 00326 00327 static bool isEphemeralValueOf(Instruction *I, const Value *E) { 00328 SmallVector<const Value *, 16> WorkSet(1, I); 00329 SmallPtrSet<const Value *, 32> Visited; 00330 SmallPtrSet<const Value *, 16> EphValues; 00331 00332 while (!WorkSet.empty()) { 00333 const Value *V = WorkSet.pop_back_val(); 00334 if (!Visited.insert(V)) 00335 continue; 00336 00337 // If all uses of this value are ephemeral, then so is this value. 00338 bool FoundNEUse = false; 00339 for (const User *I : V->users()) 00340 if (!EphValues.count(I)) { 00341 FoundNEUse = true; 00342 break; 00343 } 00344 00345 if (!FoundNEUse) { 00346 if (V == E) 00347 return true; 00348 00349 EphValues.insert(V); 00350 if (const User *U = dyn_cast<User>(V)) 00351 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 00352 J != JE; ++J) { 00353 if (isSafeToSpeculativelyExecute(*J)) 00354 WorkSet.push_back(*J); 00355 } 00356 } 00357 } 00358 00359 return false; 00360 } 00361 00362 // Is this an intrinsic that cannot be speculated but also cannot trap? 00363 static bool isAssumeLikeIntrinsic(const Instruction *I) { 00364 if (const CallInst *CI = dyn_cast<CallInst>(I)) 00365 if (Function *F = CI->getCalledFunction()) 00366 switch (F->getIntrinsicID()) { 00367 default: break; 00368 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 00369 case Intrinsic::assume: 00370 case Intrinsic::dbg_declare: 00371 case Intrinsic::dbg_value: 00372 case Intrinsic::invariant_start: 00373 case Intrinsic::invariant_end: 00374 case Intrinsic::lifetime_start: 00375 case Intrinsic::lifetime_end: 00376 case Intrinsic::objectsize: 00377 case Intrinsic::ptr_annotation: 00378 case Intrinsic::var_annotation: 00379 return true; 00380 } 00381 00382 return false; 00383 } 00384 00385 static bool isValidAssumeForContext(Value *V, const Query &Q, 00386 const DataLayout *DL) { 00387 Instruction *Inv = cast<Instruction>(V); 00388 00389 // There are two restrictions on the use of an assume: 00390 // 1. The assume must dominate the context (or the control flow must 00391 // reach the assume whenever it reaches the context). 00392 // 2. The context must not be in the assume's set of ephemeral values 00393 // (otherwise we will use the assume to prove that the condition 00394 // feeding the assume is trivially true, thus causing the removal of 00395 // the assume). 00396 00397 if (Q.DT) { 00398 if (Q.DT->dominates(Inv, Q.CxtI)) { 00399 return true; 00400 } else if (Inv->getParent() == Q.CxtI->getParent()) { 00401 // The context comes first, but they're both in the same block. Make sure 00402 // there is nothing in between that might interrupt the control flow. 00403 for (BasicBlock::const_iterator I = 00404 std::next(BasicBlock::const_iterator(Q.CxtI)), 00405 IE(Inv); I != IE; ++I) 00406 if (!isSafeToSpeculativelyExecute(I, DL) && 00407 !isAssumeLikeIntrinsic(I)) 00408 return false; 00409 00410 return !isEphemeralValueOf(Inv, Q.CxtI); 00411 } 00412 00413 return false; 00414 } 00415 00416 // When we don't have a DT, we do a limited search... 00417 if (Inv->getParent() == Q.CxtI->getParent()->getSinglePredecessor()) { 00418 return true; 00419 } else if (Inv->getParent() == Q.CxtI->getParent()) { 00420 // Search forward from the assume until we reach the context (or the end 00421 // of the block); the common case is that the assume will come first. 00422 for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)), 00423 IE = Inv->getParent()->end(); I != IE; ++I) 00424 if (I == Q.CxtI) 00425 return true; 00426 00427 // The context must come first... 00428 for (BasicBlock::const_iterator I = 00429 std::next(BasicBlock::const_iterator(Q.CxtI)), 00430 IE(Inv); I != IE; ++I) 00431 if (!isSafeToSpeculativelyExecute(I, DL) && 00432 !isAssumeLikeIntrinsic(I)) 00433 return false; 00434 00435 return !isEphemeralValueOf(Inv, Q.CxtI); 00436 } 00437 00438 return false; 00439 } 00440 00441 bool llvm::isValidAssumeForContext(const Instruction *I, 00442 const Instruction *CxtI, 00443 const DataLayout *DL, 00444 const DominatorTree *DT) { 00445 return ::isValidAssumeForContext(const_cast<Instruction*>(I), 00446 Query(nullptr, CxtI, DT), DL); 00447 } 00448 00449 template<typename LHS, typename RHS> 00450 inline match_combine_or<CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>, 00451 CmpClass_match<RHS, LHS, ICmpInst, ICmpInst::Predicate>> 00452 m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) { 00453 return m_CombineOr(m_ICmp(Pred, L, R), m_ICmp(Pred, R, L)); 00454 } 00455 00456 template<typename LHS, typename RHS> 00457 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::And>, 00458 BinaryOp_match<RHS, LHS, Instruction::And>> 00459 m_c_And(const LHS &L, const RHS &R) { 00460 return m_CombineOr(m_And(L, R), m_And(R, L)); 00461 } 00462 00463 template<typename LHS, typename RHS> 00464 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Or>, 00465 BinaryOp_match<RHS, LHS, Instruction::Or>> 00466 m_c_Or(const LHS &L, const RHS &R) { 00467 return m_CombineOr(m_Or(L, R), m_Or(R, L)); 00468 } 00469 00470 template<typename LHS, typename RHS> 00471 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Xor>, 00472 BinaryOp_match<RHS, LHS, Instruction::Xor>> 00473 m_c_Xor(const LHS &L, const RHS &R) { 00474 return m_CombineOr(m_Xor(L, R), m_Xor(R, L)); 00475 } 00476 00477 static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero, 00478 APInt &KnownOne, 00479 const DataLayout *DL, 00480 unsigned Depth, const Query &Q) { 00481 // Use of assumptions is context-sensitive. If we don't have a context, we 00482 // cannot use them! 00483 if (!Q.AT || !Q.CxtI) 00484 return; 00485 00486 unsigned BitWidth = KnownZero.getBitWidth(); 00487 00488 Function *F = const_cast<Function*>(Q.CxtI->getParent()->getParent()); 00489 for (auto &CI : Q.AT->assumptions(F)) { 00490 CallInst *I = CI; 00491 if (Q.ExclInvs.count(I)) 00492 continue; 00493 00494 if (match(I, m_Intrinsic<Intrinsic::assume>(m_Specific(V))) && 00495 isValidAssumeForContext(I, Q, DL)) { 00496 assert(BitWidth == 1 && "assume operand is not i1?"); 00497 KnownZero.clearAllBits(); 00498 KnownOne.setAllBits(); 00499 return; 00500 } 00501 00502 Value *A, *B; 00503 auto m_V = m_CombineOr(m_Specific(V), 00504 m_CombineOr(m_PtrToInt(m_Specific(V)), 00505 m_BitCast(m_Specific(V)))); 00506 00507 CmpInst::Predicate Pred; 00508 ConstantInt *C; 00509 // assume(v = a) 00510 if (match(I, m_Intrinsic<Intrinsic::assume>( 00511 m_c_ICmp(Pred, m_V, m_Value(A)))) && 00512 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { 00513 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00514 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00515 KnownZero |= RHSKnownZero; 00516 KnownOne |= RHSKnownOne; 00517 // assume(v & b = a) 00518 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00519 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A)))) && 00520 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { 00521 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00522 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00523 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0); 00524 computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I)); 00525 00526 // For those bits in the mask that are known to be one, we can propagate 00527 // known bits from the RHS to V. 00528 KnownZero |= RHSKnownZero & MaskKnownOne; 00529 KnownOne |= RHSKnownOne & MaskKnownOne; 00530 // assume(~(v & b) = a) 00531 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00532 m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 00533 m_Value(A)))) && 00534 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { 00535 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00536 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00537 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0); 00538 computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I)); 00539 00540 // For those bits in the mask that are known to be one, we can propagate 00541 // inverted known bits from the RHS to V. 00542 KnownZero |= RHSKnownOne & MaskKnownOne; 00543 KnownOne |= RHSKnownZero & MaskKnownOne; 00544 // assume(v | b = a) 00545 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00546 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A)))) && 00547 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { 00548 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00549 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00550 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 00551 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); 00552 00553 // For those bits in B that are known to be zero, we can propagate known 00554 // bits from the RHS to V. 00555 KnownZero |= RHSKnownZero & BKnownZero; 00556 KnownOne |= RHSKnownOne & BKnownZero; 00557 // assume(~(v | b) = a) 00558 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00559 m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 00560 m_Value(A)))) && 00561 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { 00562 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00563 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00564 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 00565 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); 00566 00567 // For those bits in B that are known to be zero, we can propagate 00568 // inverted known bits from the RHS to V. 00569 KnownZero |= RHSKnownOne & BKnownZero; 00570 KnownOne |= RHSKnownZero & BKnownZero; 00571 // assume(v ^ b = a) 00572 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00573 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A)))) && 00574 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { 00575 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00576 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00577 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 00578 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); 00579 00580 // For those bits in B that are known to be zero, we can propagate known 00581 // bits from the RHS to V. For those bits in B that are known to be one, 00582 // we can propagate inverted known bits from the RHS to V. 00583 KnownZero |= RHSKnownZero & BKnownZero; 00584 KnownOne |= RHSKnownOne & BKnownZero; 00585 KnownZero |= RHSKnownOne & BKnownOne; 00586 KnownOne |= RHSKnownZero & BKnownOne; 00587 // assume(~(v ^ b) = a) 00588 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00589 m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 00590 m_Value(A)))) && 00591 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { 00592 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00593 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00594 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 00595 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); 00596 00597 // For those bits in B that are known to be zero, we can propagate 00598 // inverted known bits from the RHS to V. For those bits in B that are 00599 // known to be one, we can propagate known bits from the RHS to V. 00600 KnownZero |= RHSKnownOne & BKnownZero; 00601 KnownOne |= RHSKnownZero & BKnownZero; 00602 KnownZero |= RHSKnownZero & BKnownOne; 00603 KnownOne |= RHSKnownOne & BKnownOne; 00604 // assume(v << c = a) 00605 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00606 m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 00607 m_Value(A)))) && 00608 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { 00609 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00610 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00611 // For those bits in RHS that are known, we can propagate them to known 00612 // bits in V shifted to the right by C. 00613 KnownZero |= RHSKnownZero.lshr(C->getZExtValue()); 00614 KnownOne |= RHSKnownOne.lshr(C->getZExtValue()); 00615 // assume(~(v << c) = a) 00616 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00617 m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 00618 m_Value(A)))) && 00619 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { 00620 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00621 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00622 // For those bits in RHS that are known, we can propagate them inverted 00623 // to known bits in V shifted to the right by C. 00624 KnownZero |= RHSKnownOne.lshr(C->getZExtValue()); 00625 KnownOne |= RHSKnownZero.lshr(C->getZExtValue()); 00626 // assume(v >> c = a) 00627 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00628 m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)), 00629 m_AShr(m_V, 00630 m_ConstantInt(C))), 00631 m_Value(A)))) && 00632 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { 00633 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00634 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00635 // For those bits in RHS that are known, we can propagate them to known 00636 // bits in V shifted to the right by C. 00637 KnownZero |= RHSKnownZero << C->getZExtValue(); 00638 KnownOne |= RHSKnownOne << C->getZExtValue(); 00639 // assume(~(v >> c) = a) 00640 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00641 m_c_ICmp(Pred, m_Not(m_CombineOr( 00642 m_LShr(m_V, m_ConstantInt(C)), 00643 m_AShr(m_V, m_ConstantInt(C)))), 00644 m_Value(A)))) && 00645 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { 00646 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00647 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00648 // For those bits in RHS that are known, we can propagate them inverted 00649 // to known bits in V shifted to the right by C. 00650 KnownZero |= RHSKnownOne << C->getZExtValue(); 00651 KnownOne |= RHSKnownZero << C->getZExtValue(); 00652 // assume(v >=_s c) where c is non-negative 00653 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00654 m_ICmp(Pred, m_V, m_Value(A)))) && 00655 Pred == ICmpInst::ICMP_SGE && 00656 isValidAssumeForContext(I, Q, DL)) { 00657 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00658 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00659 00660 if (RHSKnownZero.isNegative()) { 00661 // We know that the sign bit is zero. 00662 KnownZero |= APInt::getSignBit(BitWidth); 00663 } 00664 // assume(v >_s c) where c is at least -1. 00665 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00666 m_ICmp(Pred, m_V, m_Value(A)))) && 00667 Pred == ICmpInst::ICMP_SGT && 00668 isValidAssumeForContext(I, Q, DL)) { 00669 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00670 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00671 00672 if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) { 00673 // We know that the sign bit is zero. 00674 KnownZero |= APInt::getSignBit(BitWidth); 00675 } 00676 // assume(v <=_s c) where c is negative 00677 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00678 m_ICmp(Pred, m_V, m_Value(A)))) && 00679 Pred == ICmpInst::ICMP_SLE && 00680 isValidAssumeForContext(I, Q, DL)) { 00681 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00682 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00683 00684 if (RHSKnownOne.isNegative()) { 00685 // We know that the sign bit is one. 00686 KnownOne |= APInt::getSignBit(BitWidth); 00687 } 00688 // assume(v <_s c) where c is non-positive 00689 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00690 m_ICmp(Pred, m_V, m_Value(A)))) && 00691 Pred == ICmpInst::ICMP_SLT && 00692 isValidAssumeForContext(I, Q, DL)) { 00693 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00694 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00695 00696 if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) { 00697 // We know that the sign bit is one. 00698 KnownOne |= APInt::getSignBit(BitWidth); 00699 } 00700 // assume(v <=_u c) 00701 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00702 m_ICmp(Pred, m_V, m_Value(A)))) && 00703 Pred == ICmpInst::ICMP_ULE && 00704 isValidAssumeForContext(I, Q, DL)) { 00705 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00706 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00707 00708 // Whatever high bits in c are zero are known to be zero. 00709 KnownZero |= 00710 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()); 00711 // assume(v <_u c) 00712 } else if (match(I, m_Intrinsic<Intrinsic::assume>( 00713 m_ICmp(Pred, m_V, m_Value(A)))) && 00714 Pred == ICmpInst::ICMP_ULT && 00715 isValidAssumeForContext(I, Q, DL)) { 00716 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 00717 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 00718 00719 // Whatever high bits in c are zero are known to be zero (if c is a power 00720 // of 2, then one more). 00721 if (isKnownToBeAPowerOfTwo(A, false, Depth+1, Query(Q, I))) 00722 KnownZero |= 00723 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1); 00724 else 00725 KnownZero |= 00726 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()); 00727 } 00728 } 00729 } 00730 00731 /// Determine which bits of V are known to be either zero or one and return 00732 /// them in the KnownZero/KnownOne bit sets. 00733 /// 00734 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 00735 /// we cannot optimize based on the assumption that it is zero without changing 00736 /// it to be an explicit zero. If we don't change it to zero, other code could 00737 /// optimized based on the contradictory assumption that it is non-zero. 00738 /// Because instcombine aggressively folds operations with undef args anyway, 00739 /// this won't lose us code quality. 00740 /// 00741 /// This function is defined on values with integer type, values with pointer 00742 /// type (but only if TD is non-null), and vectors of integers. In the case 00743 /// where V is a vector, known zero, and known one values are the 00744 /// same width as the vector element, and the bit is set only if it is true 00745 /// for all of the elements in the vector. 00746 void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, 00747 const DataLayout *TD, unsigned Depth, 00748 const Query &Q) { 00749 assert(V && "No Value?"); 00750 assert(Depth <= MaxDepth && "Limit Search Depth"); 00751 unsigned BitWidth = KnownZero.getBitWidth(); 00752 00753 assert((V->getType()->isIntOrIntVectorTy() || 00754 V->getType()->getScalarType()->isPointerTy()) && 00755 "Not integer or pointer type!"); 00756 assert((!TD || 00757 TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) && 00758 (!V->getType()->isIntOrIntVectorTy() || 00759 V->getType()->getScalarSizeInBits() == BitWidth) && 00760 KnownZero.getBitWidth() == BitWidth && 00761 KnownOne.getBitWidth() == BitWidth && 00762 "V, KnownOne and KnownZero should have same BitWidth"); 00763 00764 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 00765 // We know all of the bits for a constant! 00766 KnownOne = CI->getValue(); 00767 KnownZero = ~KnownOne; 00768 return; 00769 } 00770 // Null and aggregate-zero are all-zeros. 00771 if (isa<ConstantPointerNull>(V) || 00772 isa<ConstantAggregateZero>(V)) { 00773 KnownOne.clearAllBits(); 00774 KnownZero = APInt::getAllOnesValue(BitWidth); 00775 return; 00776 } 00777 // Handle a constant vector by taking the intersection of the known bits of 00778 // each element. There is no real need to handle ConstantVector here, because 00779 // we don't handle undef in any particularly useful way. 00780 if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 00781 // We know that CDS must be a vector of integers. Take the intersection of 00782 // each element. 00783 KnownZero.setAllBits(); KnownOne.setAllBits(); 00784 APInt Elt(KnownZero.getBitWidth(), 0); 00785 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 00786 Elt = CDS->getElementAsInteger(i); 00787 KnownZero &= ~Elt; 00788 KnownOne &= Elt; 00789 } 00790 return; 00791 } 00792 00793 // The address of an aligned GlobalValue has trailing zeros. 00794 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 00795 unsigned Align = GV->getAlignment(); 00796 if (Align == 0 && TD) { 00797 if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) { 00798 Type *ObjectType = GVar->getType()->getElementType(); 00799 if (ObjectType->isSized()) { 00800 // If the object is defined in the current Module, we'll be giving 00801 // it the preferred alignment. Otherwise, we have to assume that it 00802 // may only have the minimum ABI alignment. 00803 if (!GVar->isDeclaration() && !GVar->isWeakForLinker()) 00804 Align = TD->getPreferredAlignment(GVar); 00805 else 00806 Align = TD->getABITypeAlignment(ObjectType); 00807 } 00808 } 00809 } 00810 if (Align > 0) 00811 KnownZero = APInt::getLowBitsSet(BitWidth, 00812 countTrailingZeros(Align)); 00813 else 00814 KnownZero.clearAllBits(); 00815 KnownOne.clearAllBits(); 00816 return; 00817 } 00818 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 00819 // the bits of its aliasee. 00820 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 00821 if (GA->mayBeOverridden()) { 00822 KnownZero.clearAllBits(); KnownOne.clearAllBits(); 00823 } else { 00824 computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, TD, Depth+1, Q); 00825 } 00826 return; 00827 } 00828 00829 if (Argument *A = dyn_cast<Argument>(V)) { 00830 unsigned Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0; 00831 00832 if (!Align && TD && A->hasStructRetAttr()) { 00833 // An sret parameter has at least the ABI alignment of the return type. 00834 Type *EltTy = cast<PointerType>(A->getType())->getElementType(); 00835 if (EltTy->isSized()) 00836 Align = TD->getABITypeAlignment(EltTy); 00837 } 00838 00839 if (Align) 00840 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align)); 00841 00842 // Don't give up yet... there might be an assumption that provides more 00843 // information... 00844 computeKnownBitsFromAssume(V, KnownZero, KnownOne, TD, Depth, Q); 00845 return; 00846 } 00847 00848 // Start out not knowing anything. 00849 KnownZero.clearAllBits(); KnownOne.clearAllBits(); 00850 00851 if (Depth == MaxDepth) 00852 return; // Limit search depth. 00853 00854 // Check whether a nearby assume intrinsic can determine some known bits. 00855 computeKnownBitsFromAssume(V, KnownZero, KnownOne, TD, Depth, Q); 00856 00857 Operator *I = dyn_cast<Operator>(V); 00858 if (!I) return; 00859 00860 APInt KnownZero2(KnownZero), KnownOne2(KnownOne); 00861 switch (I->getOpcode()) { 00862 default: break; 00863 case Instruction::Load: 00864 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range)) 00865 computeKnownBitsFromRangeMetadata(*MD, KnownZero); 00866 break; 00867 case Instruction::And: { 00868 // If either the LHS or the RHS are Zero, the result is zero. 00869 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1, Q); 00870 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1, Q); 00871 00872 // Output known-1 bits are only known if set in both the LHS & RHS. 00873 KnownOne &= KnownOne2; 00874 // Output known-0 are known to be clear if zero in either the LHS | RHS. 00875 KnownZero |= KnownZero2; 00876 break; 00877 } 00878 case Instruction::Or: { 00879 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1, Q); 00880 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1, Q); 00881 00882 // Output known-0 bits are only known if clear in both the LHS & RHS. 00883 KnownZero &= KnownZero2; 00884 // Output known-1 are known to be set if set in either the LHS | RHS. 00885 KnownOne |= KnownOne2; 00886 break; 00887 } 00888 case Instruction::Xor: { 00889 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1, Q); 00890 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1, Q); 00891 00892 // Output known-0 bits are known if clear or set in both the LHS & RHS. 00893 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 00894 // Output known-1 are known to be set if set in only one of the LHS, RHS. 00895 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 00896 KnownZero = KnownZeroOut; 00897 break; 00898 } 00899 case Instruction::Mul: { 00900 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 00901 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, 00902 KnownZero, KnownOne, KnownZero2, KnownOne2, TD, 00903 Depth, Q); 00904 break; 00905 } 00906 case Instruction::UDiv: { 00907 // For the purposes of computing leading zeros we can conservatively 00908 // treat a udiv as a logical right shift by the power of 2 known to 00909 // be less than the denominator. 00910 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1, Q); 00911 unsigned LeadZ = KnownZero2.countLeadingOnes(); 00912 00913 KnownOne2.clearAllBits(); 00914 KnownZero2.clearAllBits(); 00915 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1, Q); 00916 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 00917 if (RHSUnknownLeadingOnes != BitWidth) 00918 LeadZ = std::min(BitWidth, 00919 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 00920 00921 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ); 00922 break; 00923 } 00924 case Instruction::Select: 00925 computeKnownBits(I->getOperand(2), KnownZero, KnownOne, TD, Depth+1, Q); 00926 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1, Q); 00927 00928 // Only known if known in both the LHS and RHS. 00929 KnownOne &= KnownOne2; 00930 KnownZero &= KnownZero2; 00931 break; 00932 case Instruction::FPTrunc: 00933 case Instruction::FPExt: 00934 case Instruction::FPToUI: 00935 case Instruction::FPToSI: 00936 case Instruction::SIToFP: 00937 case Instruction::UIToFP: 00938 break; // Can't work with floating point. 00939 case Instruction::PtrToInt: 00940 case Instruction::IntToPtr: 00941 case Instruction::AddrSpaceCast: // Pointers could be different sizes. 00942 // We can't handle these if we don't know the pointer size. 00943 if (!TD) break; 00944 // FALL THROUGH and handle them the same as zext/trunc. 00945 case Instruction::ZExt: 00946 case Instruction::Trunc: { 00947 Type *SrcTy = I->getOperand(0)->getType(); 00948 00949 unsigned SrcBitWidth; 00950 // Note that we handle pointer operands here because of inttoptr/ptrtoint 00951 // which fall through here. 00952 if(TD) { 00953 SrcBitWidth = TD->getTypeSizeInBits(SrcTy->getScalarType()); 00954 } else { 00955 SrcBitWidth = SrcTy->getScalarSizeInBits(); 00956 if (!SrcBitWidth) break; 00957 } 00958 00959 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 00960 KnownZero = KnownZero.zextOrTrunc(SrcBitWidth); 00961 KnownOne = KnownOne.zextOrTrunc(SrcBitWidth); 00962 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1, Q); 00963 KnownZero = KnownZero.zextOrTrunc(BitWidth); 00964 KnownOne = KnownOne.zextOrTrunc(BitWidth); 00965 // Any top bits are known to be zero. 00966 if (BitWidth > SrcBitWidth) 00967 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 00968 break; 00969 } 00970 case Instruction::BitCast: { 00971 Type *SrcTy = I->getOperand(0)->getType(); 00972 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 00973 // TODO: For now, not handling conversions like: 00974 // (bitcast i64 %x to <2 x i32>) 00975 !I->getType()->isVectorTy()) { 00976 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1, Q); 00977 break; 00978 } 00979 break; 00980 } 00981 case Instruction::SExt: { 00982 // Compute the bits in the result that are not present in the input. 00983 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 00984 00985 KnownZero = KnownZero.trunc(SrcBitWidth); 00986 KnownOne = KnownOne.trunc(SrcBitWidth); 00987 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1, Q); 00988 KnownZero = KnownZero.zext(BitWidth); 00989 KnownOne = KnownOne.zext(BitWidth); 00990 00991 // If the sign bit of the input is known set or clear, then we know the 00992 // top bits of the result. 00993 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero 00994 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 00995 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set 00996 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 00997 break; 00998 } 00999 case Instruction::Shl: 01000 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 01001 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 01002 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 01003 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1, Q); 01004 KnownZero <<= ShiftAmt; 01005 KnownOne <<= ShiftAmt; 01006 KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0 01007 break; 01008 } 01009 break; 01010 case Instruction::LShr: 01011 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 01012 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 01013 // Compute the new bits that are at the top now. 01014 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 01015 01016 // Unsigned shift right. 01017 computeKnownBits(I->getOperand(0), KnownZero,KnownOne, TD, Depth+1, Q); 01018 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); 01019 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); 01020 // high bits known zero. 01021 KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt); 01022 break; 01023 } 01024 break; 01025 case Instruction::AShr: 01026 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 01027 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 01028 // Compute the new bits that are at the top now. 01029 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 01030 01031 // Signed shift right. 01032 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1, Q); 01033 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); 01034 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); 01035 01036 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); 01037 if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero. 01038 KnownZero |= HighBits; 01039 else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one. 01040 KnownOne |= HighBits; 01041 break; 01042 } 01043 break; 01044 case Instruction::Sub: { 01045 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 01046 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 01047 KnownZero, KnownOne, KnownZero2, KnownOne2, TD, 01048 Depth, Q); 01049 break; 01050 } 01051 case Instruction::Add: { 01052 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 01053 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 01054 KnownZero, KnownOne, KnownZero2, KnownOne2, TD, 01055 Depth, Q); 01056 break; 01057 } 01058 case Instruction::SRem: 01059 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 01060 APInt RA = Rem->getValue().abs(); 01061 if (RA.isPowerOf2()) { 01062 APInt LowBits = RA - 1; 01063 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, 01064 Depth+1, Q); 01065 01066 // The low bits of the first operand are unchanged by the srem. 01067 KnownZero = KnownZero2 & LowBits; 01068 KnownOne = KnownOne2 & LowBits; 01069 01070 // If the first operand is non-negative or has all low bits zero, then 01071 // the upper bits are all zero. 01072 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 01073 KnownZero |= ~LowBits; 01074 01075 // If the first operand is negative and not all low bits are zero, then 01076 // the upper bits are all one. 01077 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) 01078 KnownOne |= ~LowBits; 01079 01080 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 01081 } 01082 } 01083 01084 // The sign bit is the LHS's sign bit, except when the result of the 01085 // remainder is zero. 01086 if (KnownZero.isNonNegative()) { 01087 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 01088 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, TD, 01089 Depth+1, Q); 01090 // If it's known zero, our sign bit is also zero. 01091 if (LHSKnownZero.isNegative()) 01092 KnownZero.setBit(BitWidth - 1); 01093 } 01094 01095 break; 01096 case Instruction::URem: { 01097 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 01098 APInt RA = Rem->getValue(); 01099 if (RA.isPowerOf2()) { 01100 APInt LowBits = (RA - 1); 01101 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, 01102 Depth+1, Q); 01103 KnownZero |= ~LowBits; 01104 KnownOne &= LowBits; 01105 break; 01106 } 01107 } 01108 01109 // Since the result is less than or equal to either operand, any leading 01110 // zero bits in either operand must also exist in the result. 01111 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1, Q); 01112 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1, Q); 01113 01114 unsigned Leaders = std::max(KnownZero.countLeadingOnes(), 01115 KnownZero2.countLeadingOnes()); 01116 KnownOne.clearAllBits(); 01117 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders); 01118 break; 01119 } 01120 01121 case Instruction::Alloca: { 01122 AllocaInst *AI = cast<AllocaInst>(V); 01123 unsigned Align = AI->getAlignment(); 01124 if (Align == 0 && TD) 01125 Align = TD->getABITypeAlignment(AI->getType()->getElementType()); 01126 01127 if (Align > 0) 01128 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align)); 01129 break; 01130 } 01131 case Instruction::GetElementPtr: { 01132 // Analyze all of the subscripts of this getelementptr instruction 01133 // to determine if we can prove known low zero bits. 01134 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0); 01135 computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, TD, 01136 Depth+1, Q); 01137 unsigned TrailZ = LocalKnownZero.countTrailingOnes(); 01138 01139 gep_type_iterator GTI = gep_type_begin(I); 01140 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 01141 Value *Index = I->getOperand(i); 01142 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 01143 // Handle struct member offset arithmetic. 01144 if (!TD) { 01145 TrailZ = 0; 01146 break; 01147 } 01148 01149 // Handle case when index is vector zeroinitializer 01150 Constant *CIndex = cast<Constant>(Index); 01151 if (CIndex->isZeroValue()) 01152 continue; 01153 01154 if (CIndex->getType()->isVectorTy()) 01155 Index = CIndex->getSplatValue(); 01156 01157 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 01158 const StructLayout *SL = TD->getStructLayout(STy); 01159 uint64_t Offset = SL->getElementOffset(Idx); 01160 TrailZ = std::min<unsigned>(TrailZ, 01161 countTrailingZeros(Offset)); 01162 } else { 01163 // Handle array index arithmetic. 01164 Type *IndexedTy = GTI.getIndexedType(); 01165 if (!IndexedTy->isSized()) { 01166 TrailZ = 0; 01167 break; 01168 } 01169 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 01170 uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1; 01171 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); 01172 computeKnownBits(Index, LocalKnownZero, LocalKnownOne, TD, Depth+1, Q); 01173 TrailZ = std::min(TrailZ, 01174 unsigned(countTrailingZeros(TypeSize) + 01175 LocalKnownZero.countTrailingOnes())); 01176 } 01177 } 01178 01179 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ); 01180 break; 01181 } 01182 case Instruction::PHI: { 01183 PHINode *P = cast<PHINode>(I); 01184 // Handle the case of a simple two-predecessor recurrence PHI. 01185 // There's a lot more that could theoretically be done here, but 01186 // this is sufficient to catch some interesting cases. 01187 if (P->getNumIncomingValues() == 2) { 01188 for (unsigned i = 0; i != 2; ++i) { 01189 Value *L = P->getIncomingValue(i); 01190 Value *R = P->getIncomingValue(!i); 01191 Operator *LU = dyn_cast<Operator>(L); 01192 if (!LU) 01193 continue; 01194 unsigned Opcode = LU->getOpcode(); 01195 // Check for operations that have the property that if 01196 // both their operands have low zero bits, the result 01197 // will have low zero bits. 01198 if (Opcode == Instruction::Add || 01199 Opcode == Instruction::Sub || 01200 Opcode == Instruction::And || 01201 Opcode == Instruction::Or || 01202 Opcode == Instruction::Mul) { 01203 Value *LL = LU->getOperand(0); 01204 Value *LR = LU->getOperand(1); 01205 // Find a recurrence. 01206 if (LL == I) 01207 L = LR; 01208 else if (LR == I) 01209 L = LL; 01210 else 01211 break; 01212 // Ok, we have a PHI of the form L op= R. Check for low 01213 // zero bits. 01214 computeKnownBits(R, KnownZero2, KnownOne2, TD, Depth+1, Q); 01215 01216 // We need to take the minimum number of known bits 01217 APInt KnownZero3(KnownZero), KnownOne3(KnownOne); 01218 computeKnownBits(L, KnownZero3, KnownOne3, TD, Depth+1, Q); 01219 01220 KnownZero = APInt::getLowBitsSet(BitWidth, 01221 std::min(KnownZero2.countTrailingOnes(), 01222 KnownZero3.countTrailingOnes())); 01223 break; 01224 } 01225 } 01226 } 01227 01228 // Unreachable blocks may have zero-operand PHI nodes. 01229 if (P->getNumIncomingValues() == 0) 01230 break; 01231 01232 // Otherwise take the unions of the known bit sets of the operands, 01233 // taking conservative care to avoid excessive recursion. 01234 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) { 01235 // Skip if every incoming value references to ourself. 01236 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 01237 break; 01238 01239 KnownZero = APInt::getAllOnesValue(BitWidth); 01240 KnownOne = APInt::getAllOnesValue(BitWidth); 01241 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) { 01242 // Skip direct self references. 01243 if (P->getIncomingValue(i) == P) continue; 01244 01245 KnownZero2 = APInt(BitWidth, 0); 01246 KnownOne2 = APInt(BitWidth, 0); 01247 // Recurse, but cap the recursion to one level, because we don't 01248 // want to waste time spinning around in loops. 01249 computeKnownBits(P->getIncomingValue(i), KnownZero2, KnownOne2, TD, 01250 MaxDepth-1, Q); 01251 KnownZero &= KnownZero2; 01252 KnownOne &= KnownOne2; 01253 // If all bits have been ruled out, there's no need to check 01254 // more operands. 01255 if (!KnownZero && !KnownOne) 01256 break; 01257 } 01258 } 01259 break; 01260 } 01261 case Instruction::Call: 01262 case Instruction::Invoke: 01263 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range)) 01264 computeKnownBitsFromRangeMetadata(*MD, KnownZero); 01265 // If a range metadata is attached to this IntrinsicInst, intersect the 01266 // explicit range specified by the metadata and the implicit range of 01267 // the intrinsic. 01268 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 01269 switch (II->getIntrinsicID()) { 01270 default: break; 01271 case Intrinsic::ctlz: 01272 case Intrinsic::cttz: { 01273 unsigned LowBits = Log2_32(BitWidth)+1; 01274 // If this call is undefined for 0, the result will be less than 2^n. 01275 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 01276 LowBits -= 1; 01277 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 01278 break; 01279 } 01280 case Intrinsic::ctpop: { 01281 unsigned LowBits = Log2_32(BitWidth)+1; 01282 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 01283 break; 01284 } 01285 case Intrinsic::x86_sse42_crc32_64_64: 01286 KnownZero |= APInt::getHighBitsSet(64, 32); 01287 break; 01288 } 01289 } 01290 break; 01291 case Instruction::ExtractValue: 01292 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 01293 ExtractValueInst *EVI = cast<ExtractValueInst>(I); 01294 if (EVI->getNumIndices() != 1) break; 01295 if (EVI->getIndices()[0] == 0) { 01296 switch (II->getIntrinsicID()) { 01297 default: break; 01298 case Intrinsic::uadd_with_overflow: 01299 case Intrinsic::sadd_with_overflow: 01300 computeKnownBitsAddSub(true, II->getArgOperand(0), 01301 II->getArgOperand(1), false, KnownZero, 01302 KnownOne, KnownZero2, KnownOne2, TD, Depth, Q); 01303 break; 01304 case Intrinsic::usub_with_overflow: 01305 case Intrinsic::ssub_with_overflow: 01306 computeKnownBitsAddSub(false, II->getArgOperand(0), 01307 II->getArgOperand(1), false, KnownZero, 01308 KnownOne, KnownZero2, KnownOne2, TD, Depth, Q); 01309 break; 01310 case Intrinsic::umul_with_overflow: 01311 case Intrinsic::smul_with_overflow: 01312 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), 01313 false, KnownZero, KnownOne, 01314 KnownZero2, KnownOne2, TD, Depth, Q); 01315 break; 01316 } 01317 } 01318 } 01319 } 01320 01321 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 01322 } 01323 01324 /// ComputeSignBit - Determine whether the sign bit is known to be zero or 01325 /// one. Convenience wrapper around computeKnownBits. 01326 void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 01327 const DataLayout *TD, unsigned Depth, 01328 const Query &Q) { 01329 unsigned BitWidth = getBitWidth(V->getType(), TD); 01330 if (!BitWidth) { 01331 KnownZero = false; 01332 KnownOne = false; 01333 return; 01334 } 01335 APInt ZeroBits(BitWidth, 0); 01336 APInt OneBits(BitWidth, 0); 01337 computeKnownBits(V, ZeroBits, OneBits, TD, Depth, Q); 01338 KnownOne = OneBits[BitWidth - 1]; 01339 KnownZero = ZeroBits[BitWidth - 1]; 01340 } 01341 01342 /// isKnownToBeAPowerOfTwo - Return true if the given value is known to have exactly one 01343 /// bit set when defined. For vectors return true if every element is known to 01344 /// be a power of two when defined. Supports values with integer or pointer 01345 /// types and vectors of integers. 01346 bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth, 01347 const Query &Q) { 01348 if (Constant *C = dyn_cast<Constant>(V)) { 01349 if (C->isNullValue()) 01350 return OrZero; 01351 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) 01352 return CI->getValue().isPowerOf2(); 01353 // TODO: Handle vector constants. 01354 } 01355 01356 // 1 << X is clearly a power of two if the one is not shifted off the end. If 01357 // it is shifted off the end then the result is undefined. 01358 if (match(V, m_Shl(m_One(), m_Value()))) 01359 return true; 01360 01361 // (signbit) >>l X is clearly a power of two if the one is not shifted off the 01362 // bottom. If it is shifted off the bottom then the result is undefined. 01363 if (match(V, m_LShr(m_SignBit(), m_Value()))) 01364 return true; 01365 01366 // The remaining tests are all recursive, so bail out if we hit the limit. 01367 if (Depth++ == MaxDepth) 01368 return false; 01369 01370 Value *X = nullptr, *Y = nullptr; 01371 // A shift of a power of two is a power of two or zero. 01372 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 01373 match(V, m_Shr(m_Value(X), m_Value())))) 01374 return isKnownToBeAPowerOfTwo(X, /*OrZero*/true, Depth, Q); 01375 01376 if (ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 01377 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 01378 01379 if (SelectInst *SI = dyn_cast<SelectInst>(V)) 01380 return 01381 isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 01382 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 01383 01384 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 01385 // A power of two and'd with anything is a power of two or zero. 01386 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/true, Depth, Q) || 01387 isKnownToBeAPowerOfTwo(Y, /*OrZero*/true, Depth, Q)) 01388 return true; 01389 // X & (-X) is always a power of two or zero. 01390 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 01391 return true; 01392 return false; 01393 } 01394 01395 // Adding a power-of-two or zero to the same power-of-two or zero yields 01396 // either the original power-of-two, a larger power-of-two or zero. 01397 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 01398 OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 01399 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) { 01400 if (match(X, m_And(m_Specific(Y), m_Value())) || 01401 match(X, m_And(m_Value(), m_Specific(Y)))) 01402 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 01403 return true; 01404 if (match(Y, m_And(m_Specific(X), m_Value())) || 01405 match(Y, m_And(m_Value(), m_Specific(X)))) 01406 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 01407 return true; 01408 01409 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 01410 APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0); 01411 computeKnownBits(X, LHSZeroBits, LHSOneBits, nullptr, Depth, Q); 01412 01413 APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0); 01414 computeKnownBits(Y, RHSZeroBits, RHSOneBits, nullptr, Depth, Q); 01415 // If i8 V is a power of two or zero: 01416 // ZeroBits: 1 1 1 0 1 1 1 1 01417 // ~ZeroBits: 0 0 0 1 0 0 0 0 01418 if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2()) 01419 // If OrZero isn't set, we cannot give back a zero result. 01420 // Make sure either the LHS or RHS has a bit set. 01421 if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue()) 01422 return true; 01423 } 01424 } 01425 01426 // An exact divide or right shift can only shift off zero bits, so the result 01427 // is a power of two only if the first operand is a power of two and not 01428 // copying a sign bit (sdiv int_min, 2). 01429 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 01430 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 01431 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 01432 Depth, Q); 01433 } 01434 01435 return false; 01436 } 01437 01438 /// \brief Test whether a GEP's result is known to be non-null. 01439 /// 01440 /// Uses properties inherent in a GEP to try to determine whether it is known 01441 /// to be non-null. 01442 /// 01443 /// Currently this routine does not support vector GEPs. 01444 static bool isGEPKnownNonNull(GEPOperator *GEP, const DataLayout *DL, 01445 unsigned Depth, const Query &Q) { 01446 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0) 01447 return false; 01448 01449 // FIXME: Support vector-GEPs. 01450 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 01451 01452 // If the base pointer is non-null, we cannot walk to a null address with an 01453 // inbounds GEP in address space zero. 01454 if (isKnownNonZero(GEP->getPointerOperand(), DL, Depth, Q)) 01455 return true; 01456 01457 // Past this, if we don't have DataLayout, we can't do much. 01458 if (!DL) 01459 return false; 01460 01461 // Walk the GEP operands and see if any operand introduces a non-zero offset. 01462 // If so, then the GEP cannot produce a null pointer, as doing so would 01463 // inherently violate the inbounds contract within address space zero. 01464 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 01465 GTI != GTE; ++GTI) { 01466 // Struct types are easy -- they must always be indexed by a constant. 01467 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 01468 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 01469 unsigned ElementIdx = OpC->getZExtValue(); 01470 const StructLayout *SL = DL->getStructLayout(STy); 01471 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 01472 if (ElementOffset > 0) 01473 return true; 01474 continue; 01475 } 01476 01477 // If we have a zero-sized type, the index doesn't matter. Keep looping. 01478 if (DL->getTypeAllocSize(GTI.getIndexedType()) == 0) 01479 continue; 01480 01481 // Fast path the constant operand case both for efficiency and so we don't 01482 // increment Depth when just zipping down an all-constant GEP. 01483 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 01484 if (!OpC->isZero()) 01485 return true; 01486 continue; 01487 } 01488 01489 // We post-increment Depth here because while isKnownNonZero increments it 01490 // as well, when we pop back up that increment won't persist. We don't want 01491 // to recurse 10k times just because we have 10k GEP operands. We don't 01492 // bail completely out because we want to handle constant GEPs regardless 01493 // of depth. 01494 if (Depth++ >= MaxDepth) 01495 continue; 01496 01497 if (isKnownNonZero(GTI.getOperand(), DL, Depth, Q)) 01498 return true; 01499 } 01500 01501 return false; 01502 } 01503 01504 /// isKnownNonZero - Return true if the given value is known to be non-zero 01505 /// when defined. For vectors return true if every element is known to be 01506 /// non-zero when defined. Supports values with integer or pointer type and 01507 /// vectors of integers. 01508 bool isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth, 01509 const Query &Q) { 01510 if (Constant *C = dyn_cast<Constant>(V)) { 01511 if (C->isNullValue()) 01512 return false; 01513 if (isa<ConstantInt>(C)) 01514 // Must be non-zero due to null test above. 01515 return true; 01516 // TODO: Handle vectors 01517 return false; 01518 } 01519 01520 // The remaining tests are all recursive, so bail out if we hit the limit. 01521 if (Depth++ >= MaxDepth) 01522 return false; 01523 01524 // Check for pointer simplifications. 01525 if (V->getType()->isPointerTy()) { 01526 if (isKnownNonNull(V)) 01527 return true; 01528 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 01529 if (isGEPKnownNonNull(GEP, TD, Depth, Q)) 01530 return true; 01531 } 01532 01533 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), TD); 01534 01535 // X | Y != 0 if X != 0 or Y != 0. 01536 Value *X = nullptr, *Y = nullptr; 01537 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 01538 return isKnownNonZero(X, TD, Depth, Q) || 01539 isKnownNonZero(Y, TD, Depth, Q); 01540 01541 // ext X != 0 if X != 0. 01542 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 01543 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), TD, Depth, Q); 01544 01545 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 01546 // if the lowest bit is shifted off the end. 01547 if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) { 01548 // shl nuw can't remove any non-zero bits. 01549 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 01550 if (BO->hasNoUnsignedWrap()) 01551 return isKnownNonZero(X, TD, Depth, Q); 01552 01553 APInt KnownZero(BitWidth, 0); 01554 APInt KnownOne(BitWidth, 0); 01555 computeKnownBits(X, KnownZero, KnownOne, TD, Depth, Q); 01556 if (KnownOne[0]) 01557 return true; 01558 } 01559 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 01560 // defined if the sign bit is shifted off the end. 01561 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 01562 // shr exact can only shift out zero bits. 01563 PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 01564 if (BO->isExact()) 01565 return isKnownNonZero(X, TD, Depth, Q); 01566 01567 bool XKnownNonNegative, XKnownNegative; 01568 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth, Q); 01569 if (XKnownNegative) 01570 return true; 01571 } 01572 // div exact can only produce a zero if the dividend is zero. 01573 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 01574 return isKnownNonZero(X, TD, Depth, Q); 01575 } 01576 // X + Y. 01577 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 01578 bool XKnownNonNegative, XKnownNegative; 01579 bool YKnownNonNegative, YKnownNegative; 01580 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth, Q); 01581 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, TD, Depth, Q); 01582 01583 // If X and Y are both non-negative (as signed values) then their sum is not 01584 // zero unless both X and Y are zero. 01585 if (XKnownNonNegative && YKnownNonNegative) 01586 if (isKnownNonZero(X, TD, Depth, Q) || 01587 isKnownNonZero(Y, TD, Depth, Q)) 01588 return true; 01589 01590 // If X and Y are both negative (as signed values) then their sum is not 01591 // zero unless both X and Y equal INT_MIN. 01592 if (BitWidth && XKnownNegative && YKnownNegative) { 01593 APInt KnownZero(BitWidth, 0); 01594 APInt KnownOne(BitWidth, 0); 01595 APInt Mask = APInt::getSignedMaxValue(BitWidth); 01596 // The sign bit of X is set. If some other bit is set then X is not equal 01597 // to INT_MIN. 01598 computeKnownBits(X, KnownZero, KnownOne, TD, Depth, Q); 01599 if ((KnownOne & Mask) != 0) 01600 return true; 01601 // The sign bit of Y is set. If some other bit is set then Y is not equal 01602 // to INT_MIN. 01603 computeKnownBits(Y, KnownZero, KnownOne, TD, Depth, Q); 01604 if ((KnownOne & Mask) != 0) 01605 return true; 01606 } 01607 01608 // The sum of a non-negative number and a power of two is not zero. 01609 if (XKnownNonNegative && 01610 isKnownToBeAPowerOfTwo(Y, /*OrZero*/false, Depth, Q)) 01611 return true; 01612 if (YKnownNonNegative && 01613 isKnownToBeAPowerOfTwo(X, /*OrZero*/false, Depth, Q)) 01614 return true; 01615 } 01616 // X * Y. 01617 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 01618 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 01619 // If X and Y are non-zero then so is X * Y as long as the multiplication 01620 // does not overflow. 01621 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && 01622 isKnownNonZero(X, TD, Depth, Q) && 01623 isKnownNonZero(Y, TD, Depth, Q)) 01624 return true; 01625 } 01626 // (C ? X : Y) != 0 if X != 0 and Y != 0. 01627 else if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 01628 if (isKnownNonZero(SI->getTrueValue(), TD, Depth, Q) && 01629 isKnownNonZero(SI->getFalseValue(), TD, Depth, Q)) 01630 return true; 01631 } 01632 01633 if (!BitWidth) return false; 01634 APInt KnownZero(BitWidth, 0); 01635 APInt KnownOne(BitWidth, 0); 01636 computeKnownBits(V, KnownZero, KnownOne, TD, Depth, Q); 01637 return KnownOne != 0; 01638 } 01639 01640 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 01641 /// this predicate to simplify operations downstream. Mask is known to be zero 01642 /// for bits that V cannot have. 01643 /// 01644 /// This function is defined on values with integer type, values with pointer 01645 /// type (but only if TD is non-null), and vectors of integers. In the case 01646 /// where V is a vector, the mask, known zero, and known one values are the 01647 /// same width as the vector element, and the bit is set only if it is true 01648 /// for all of the elements in the vector. 01649 bool MaskedValueIsZero(Value *V, const APInt &Mask, 01650 const DataLayout *TD, unsigned Depth, 01651 const Query &Q) { 01652 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0); 01653 computeKnownBits(V, KnownZero, KnownOne, TD, Depth, Q); 01654 return (KnownZero & Mask) == Mask; 01655 } 01656 01657 01658 01659 /// ComputeNumSignBits - Return the number of times the sign bit of the 01660 /// register is replicated into the other bits. We know that at least 1 bit 01661 /// is always equal to the sign bit (itself), but other cases can give us 01662 /// information. For example, immediately after an "ashr X, 2", we know that 01663 /// the top 3 bits are all equal to each other, so we return 3. 01664 /// 01665 /// 'Op' must have a scalar integer type. 01666 /// 01667 unsigned ComputeNumSignBits(Value *V, const DataLayout *TD, 01668 unsigned Depth, const Query &Q) { 01669 assert((TD || V->getType()->isIntOrIntVectorTy()) && 01670 "ComputeNumSignBits requires a DataLayout object to operate " 01671 "on non-integer values!"); 01672 Type *Ty = V->getType(); 01673 unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) : 01674 Ty->getScalarSizeInBits(); 01675 unsigned Tmp, Tmp2; 01676 unsigned FirstAnswer = 1; 01677 01678 // Note that ConstantInt is handled by the general computeKnownBits case 01679 // below. 01680 01681 if (Depth == 6) 01682 return 1; // Limit search depth. 01683 01684 Operator *U = dyn_cast<Operator>(V); 01685 switch (Operator::getOpcode(V)) { 01686 default: break; 01687 case Instruction::SExt: 01688 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 01689 return ComputeNumSignBits(U->getOperand(0), TD, Depth+1, Q) + Tmp; 01690 01691 case Instruction::AShr: { 01692 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1, Q); 01693 // ashr X, C -> adds C sign bits. Vectors too. 01694 const APInt *ShAmt; 01695 if (match(U->getOperand(1), m_APInt(ShAmt))) { 01696 Tmp += ShAmt->getZExtValue(); 01697 if (Tmp > TyBits) Tmp = TyBits; 01698 } 01699 return Tmp; 01700 } 01701 case Instruction::Shl: { 01702 const APInt *ShAmt; 01703 if (match(U->getOperand(1), m_APInt(ShAmt))) { 01704 // shl destroys sign bits. 01705 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1, Q); 01706 Tmp2 = ShAmt->getZExtValue(); 01707 if (Tmp2 >= TyBits || // Bad shift. 01708 Tmp2 >= Tmp) break; // Shifted all sign bits out. 01709 return Tmp - Tmp2; 01710 } 01711 break; 01712 } 01713 case Instruction::And: 01714 case Instruction::Or: 01715 case Instruction::Xor: // NOT is handled here. 01716 // Logical binary ops preserve the number of sign bits at the worst. 01717 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1, Q); 01718 if (Tmp != 1) { 01719 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1, Q); 01720 FirstAnswer = std::min(Tmp, Tmp2); 01721 // We computed what we know about the sign bits as our first 01722 // answer. Now proceed to the generic code that uses 01723 // computeKnownBits, and pick whichever answer is better. 01724 } 01725 break; 01726 01727 case Instruction::Select: 01728 Tmp = ComputeNumSignBits(U->getOperand(1), TD, Depth+1, Q); 01729 if (Tmp == 1) return 1; // Early out. 01730 Tmp2 = ComputeNumSignBits(U->getOperand(2), TD, Depth+1, Q); 01731 return std::min(Tmp, Tmp2); 01732 01733 case Instruction::Add: 01734 // Add can have at most one carry bit. Thus we know that the output 01735 // is, at worst, one more bit than the inputs. 01736 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1, Q); 01737 if (Tmp == 1) return 1; // Early out. 01738 01739 // Special case decrementing a value (ADD X, -1): 01740 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1))) 01741 if (CRHS->isAllOnesValue()) { 01742 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 01743 computeKnownBits(U->getOperand(0), KnownZero, KnownOne, TD, Depth+1, Q); 01744 01745 // If the input is known to be 0 or 1, the output is 0/-1, which is all 01746 // sign bits set. 01747 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 01748 return TyBits; 01749 01750 // If we are subtracting one from a positive number, there is no carry 01751 // out of the result. 01752 if (KnownZero.isNegative()) 01753 return Tmp; 01754 } 01755 01756 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1, Q); 01757 if (Tmp2 == 1) return 1; 01758 return std::min(Tmp, Tmp2)-1; 01759 01760 case Instruction::Sub: 01761 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1, Q); 01762 if (Tmp2 == 1) return 1; 01763 01764 // Handle NEG. 01765 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0))) 01766 if (CLHS->isNullValue()) { 01767 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 01768 computeKnownBits(U->getOperand(1), KnownZero, KnownOne, TD, Depth+1, Q); 01769 // If the input is known to be 0 or 1, the output is 0/-1, which is all 01770 // sign bits set. 01771 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 01772 return TyBits; 01773 01774 // If the input is known to be positive (the sign bit is known clear), 01775 // the output of the NEG has the same number of sign bits as the input. 01776 if (KnownZero.isNegative()) 01777 return Tmp2; 01778 01779 // Otherwise, we treat this like a SUB. 01780 } 01781 01782 // Sub can have at most one carry bit. Thus we know that the output 01783 // is, at worst, one more bit than the inputs. 01784 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1, Q); 01785 if (Tmp == 1) return 1; // Early out. 01786 return std::min(Tmp, Tmp2)-1; 01787 01788 case Instruction::PHI: { 01789 PHINode *PN = cast<PHINode>(U); 01790 // Don't analyze large in-degree PHIs. 01791 if (PN->getNumIncomingValues() > 4) break; 01792 01793 // Take the minimum of all incoming values. This can't infinitely loop 01794 // because of our depth threshold. 01795 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), TD, Depth+1, Q); 01796 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { 01797 if (Tmp == 1) return Tmp; 01798 Tmp = std::min(Tmp, 01799 ComputeNumSignBits(PN->getIncomingValue(i), TD, 01800 Depth+1, Q)); 01801 } 01802 return Tmp; 01803 } 01804 01805 case Instruction::Trunc: 01806 // FIXME: it's tricky to do anything useful for this, but it is an important 01807 // case for targets like X86. 01808 break; 01809 } 01810 01811 // Finally, if we can prove that the top bits of the result are 0's or 1's, 01812 // use this information. 01813 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 01814 APInt Mask; 01815 computeKnownBits(V, KnownZero, KnownOne, TD, Depth, Q); 01816 01817 if (KnownZero.isNegative()) { // sign bit is 0 01818 Mask = KnownZero; 01819 } else if (KnownOne.isNegative()) { // sign bit is 1; 01820 Mask = KnownOne; 01821 } else { 01822 // Nothing known. 01823 return FirstAnswer; 01824 } 01825 01826 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 01827 // the number of identical bits in the top of the input value. 01828 Mask = ~Mask; 01829 Mask <<= Mask.getBitWidth()-TyBits; 01830 // Return # leading zeros. We use 'min' here in case Val was zero before 01831 // shifting. We don't want to return '64' as for an i32 "0". 01832 return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros())); 01833 } 01834 01835 /// ComputeMultiple - This function computes the integer multiple of Base that 01836 /// equals V. If successful, it returns true and returns the multiple in 01837 /// Multiple. If unsuccessful, it returns false. It looks 01838 /// through SExt instructions only if LookThroughSExt is true. 01839 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 01840 bool LookThroughSExt, unsigned Depth) { 01841 const unsigned MaxDepth = 6; 01842 01843 assert(V && "No Value?"); 01844 assert(Depth <= MaxDepth && "Limit Search Depth"); 01845 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 01846 01847 Type *T = V->getType(); 01848 01849 ConstantInt *CI = dyn_cast<ConstantInt>(V); 01850 01851 if (Base == 0) 01852 return false; 01853 01854 if (Base == 1) { 01855 Multiple = V; 01856 return true; 01857 } 01858 01859 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 01860 Constant *BaseVal = ConstantInt::get(T, Base); 01861 if (CO && CO == BaseVal) { 01862 // Multiple is 1. 01863 Multiple = ConstantInt::get(T, 1); 01864 return true; 01865 } 01866 01867 if (CI && CI->getZExtValue() % Base == 0) { 01868 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 01869 return true; 01870 } 01871 01872 if (Depth == MaxDepth) return false; // Limit search depth. 01873 01874 Operator *I = dyn_cast<Operator>(V); 01875 if (!I) return false; 01876 01877 switch (I->getOpcode()) { 01878 default: break; 01879 case Instruction::SExt: 01880 if (!LookThroughSExt) return false; 01881 // otherwise fall through to ZExt 01882 case Instruction::ZExt: 01883 return ComputeMultiple(I->getOperand(0), Base, Multiple, 01884 LookThroughSExt, Depth+1); 01885 case Instruction::Shl: 01886 case Instruction::Mul: { 01887 Value *Op0 = I->getOperand(0); 01888 Value *Op1 = I->getOperand(1); 01889 01890 if (I->getOpcode() == Instruction::Shl) { 01891 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 01892 if (!Op1CI) return false; 01893 // Turn Op0 << Op1 into Op0 * 2^Op1 01894 APInt Op1Int = Op1CI->getValue(); 01895 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 01896 APInt API(Op1Int.getBitWidth(), 0); 01897 API.setBit(BitToSet); 01898 Op1 = ConstantInt::get(V->getContext(), API); 01899 } 01900 01901 Value *Mul0 = nullptr; 01902 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 01903 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 01904 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 01905 if (Op1C->getType()->getPrimitiveSizeInBits() < 01906 MulC->getType()->getPrimitiveSizeInBits()) 01907 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 01908 if (Op1C->getType()->getPrimitiveSizeInBits() > 01909 MulC->getType()->getPrimitiveSizeInBits()) 01910 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 01911 01912 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 01913 Multiple = ConstantExpr::getMul(MulC, Op1C); 01914 return true; 01915 } 01916 01917 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 01918 if (Mul0CI->getValue() == 1) { 01919 // V == Base * Op1, so return Op1 01920 Multiple = Op1; 01921 return true; 01922 } 01923 } 01924 01925 Value *Mul1 = nullptr; 01926 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 01927 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 01928 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 01929 if (Op0C->getType()->getPrimitiveSizeInBits() < 01930 MulC->getType()->getPrimitiveSizeInBits()) 01931 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 01932 if (Op0C->getType()->getPrimitiveSizeInBits() > 01933 MulC->getType()->getPrimitiveSizeInBits()) 01934 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 01935 01936 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 01937 Multiple = ConstantExpr::getMul(MulC, Op0C); 01938 return true; 01939 } 01940 01941 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 01942 if (Mul1CI->getValue() == 1) { 01943 // V == Base * Op0, so return Op0 01944 Multiple = Op0; 01945 return true; 01946 } 01947 } 01948 } 01949 } 01950 01951 // We could not determine if V is a multiple of Base. 01952 return false; 01953 } 01954 01955 /// CannotBeNegativeZero - Return true if we can prove that the specified FP 01956 /// value is never equal to -0.0. 01957 /// 01958 /// NOTE: this function will need to be revisited when we support non-default 01959 /// rounding modes! 01960 /// 01961 bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) { 01962 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 01963 return !CFP->getValueAPF().isNegZero(); 01964 01965 if (Depth == 6) 01966 return 1; // Limit search depth. 01967 01968 const Operator *I = dyn_cast<Operator>(V); 01969 if (!I) return false; 01970 01971 // Check if the nsz fast-math flag is set 01972 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I)) 01973 if (FPO->hasNoSignedZeros()) 01974 return true; 01975 01976 // (add x, 0.0) is guaranteed to return +0.0, not -0.0. 01977 if (I->getOpcode() == Instruction::FAdd) 01978 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1))) 01979 if (CFP->isNullValue()) 01980 return true; 01981 01982 // sitofp and uitofp turn into +0.0 for zero. 01983 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I)) 01984 return true; 01985 01986 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 01987 // sqrt(-0.0) = -0.0, no other negative results are possible. 01988 if (II->getIntrinsicID() == Intrinsic::sqrt) 01989 return CannotBeNegativeZero(II->getArgOperand(0), Depth+1); 01990 01991 if (const CallInst *CI = dyn_cast<CallInst>(I)) 01992 if (const Function *F = CI->getCalledFunction()) { 01993 if (F->isDeclaration()) { 01994 // abs(x) != -0.0 01995 if (F->getName() == "abs") return true; 01996 // fabs[lf](x) != -0.0 01997 if (F->getName() == "fabs") return true; 01998 if (F->getName() == "fabsf") return true; 01999 if (F->getName() == "fabsl") return true; 02000 if (F->getName() == "sqrt" || F->getName() == "sqrtf" || 02001 F->getName() == "sqrtl") 02002 return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1); 02003 } 02004 } 02005 02006 return false; 02007 } 02008 02009 /// isBytewiseValue - If the specified value can be set by repeating the same 02010 /// byte in memory, return the i8 value that it is represented with. This is 02011 /// true for all i8 values obviously, but is also true for i32 0, i32 -1, 02012 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 02013 /// byte store (e.g. i16 0x1234), return null. 02014 Value *llvm::isBytewiseValue(Value *V) { 02015 // All byte-wide stores are splatable, even of arbitrary variables. 02016 if (V->getType()->isIntegerTy(8)) return V; 02017 02018 // Handle 'null' ConstantArrayZero etc. 02019 if (Constant *C = dyn_cast<Constant>(V)) 02020 if (C->isNullValue()) 02021 return Constant::getNullValue(Type::getInt8Ty(V->getContext())); 02022 02023 // Constant float and double values can be handled as integer values if the 02024 // corresponding integer value is "byteable". An important case is 0.0. 02025 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 02026 if (CFP->getType()->isFloatTy()) 02027 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 02028 if (CFP->getType()->isDoubleTy()) 02029 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 02030 // Don't handle long double formats, which have strange constraints. 02031 } 02032 02033 // We can handle constant integers that are power of two in size and a 02034 // multiple of 8 bits. 02035 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 02036 unsigned Width = CI->getBitWidth(); 02037 if (isPowerOf2_32(Width) && Width > 8) { 02038 // We can handle this value if the recursive binary decomposition is the 02039 // same at all levels. 02040 APInt Val = CI->getValue(); 02041 APInt Val2; 02042 while (Val.getBitWidth() != 8) { 02043 unsigned NextWidth = Val.getBitWidth()/2; 02044 Val2 = Val.lshr(NextWidth); 02045 Val2 = Val2.trunc(Val.getBitWidth()/2); 02046 Val = Val.trunc(Val.getBitWidth()/2); 02047 02048 // If the top/bottom halves aren't the same, reject it. 02049 if (Val != Val2) 02050 return nullptr; 02051 } 02052 return ConstantInt::get(V->getContext(), Val); 02053 } 02054 } 02055 02056 // A ConstantDataArray/Vector is splatable if all its members are equal and 02057 // also splatable. 02058 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { 02059 Value *Elt = CA->getElementAsConstant(0); 02060 Value *Val = isBytewiseValue(Elt); 02061 if (!Val) 02062 return nullptr; 02063 02064 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) 02065 if (CA->getElementAsConstant(I) != Elt) 02066 return nullptr; 02067 02068 return Val; 02069 } 02070 02071 // Conceptually, we could handle things like: 02072 // %a = zext i8 %X to i16 02073 // %b = shl i16 %a, 8 02074 // %c = or i16 %a, %b 02075 // but until there is an example that actually needs this, it doesn't seem 02076 // worth worrying about. 02077 return nullptr; 02078 } 02079 02080 02081 // This is the recursive version of BuildSubAggregate. It takes a few different 02082 // arguments. Idxs is the index within the nested struct From that we are 02083 // looking at now (which is of type IndexedType). IdxSkip is the number of 02084 // indices from Idxs that should be left out when inserting into the resulting 02085 // struct. To is the result struct built so far, new insertvalue instructions 02086 // build on that. 02087 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 02088 SmallVectorImpl<unsigned> &Idxs, 02089 unsigned IdxSkip, 02090 Instruction *InsertBefore) { 02091 llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType); 02092 if (STy) { 02093 // Save the original To argument so we can modify it 02094 Value *OrigTo = To; 02095 // General case, the type indexed by Idxs is a struct 02096 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 02097 // Process each struct element recursively 02098 Idxs.push_back(i); 02099 Value *PrevTo = To; 02100 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 02101 InsertBefore); 02102 Idxs.pop_back(); 02103 if (!To) { 02104 // Couldn't find any inserted value for this index? Cleanup 02105 while (PrevTo != OrigTo) { 02106 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 02107 PrevTo = Del->getAggregateOperand(); 02108 Del->eraseFromParent(); 02109 } 02110 // Stop processing elements 02111 break; 02112 } 02113 } 02114 // If we successfully found a value for each of our subaggregates 02115 if (To) 02116 return To; 02117 } 02118 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 02119 // the struct's elements had a value that was inserted directly. In the latter 02120 // case, perhaps we can't determine each of the subelements individually, but 02121 // we might be able to find the complete struct somewhere. 02122 02123 // Find the value that is at that particular spot 02124 Value *V = FindInsertedValue(From, Idxs); 02125 02126 if (!V) 02127 return nullptr; 02128 02129 // Insert the value in the new (sub) aggregrate 02130 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 02131 "tmp", InsertBefore); 02132 } 02133 02134 // This helper takes a nested struct and extracts a part of it (which is again a 02135 // struct) into a new value. For example, given the struct: 02136 // { a, { b, { c, d }, e } } 02137 // and the indices "1, 1" this returns 02138 // { c, d }. 02139 // 02140 // It does this by inserting an insertvalue for each element in the resulting 02141 // struct, as opposed to just inserting a single struct. This will only work if 02142 // each of the elements of the substruct are known (ie, inserted into From by an 02143 // insertvalue instruction somewhere). 02144 // 02145 // All inserted insertvalue instructions are inserted before InsertBefore 02146 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 02147 Instruction *InsertBefore) { 02148 assert(InsertBefore && "Must have someplace to insert!"); 02149 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 02150 idx_range); 02151 Value *To = UndefValue::get(IndexedType); 02152 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 02153 unsigned IdxSkip = Idxs.size(); 02154 02155 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 02156 } 02157 02158 /// FindInsertedValue - Given an aggregrate and an sequence of indices, see if 02159 /// the scalar value indexed is already around as a register, for example if it 02160 /// were inserted directly into the aggregrate. 02161 /// 02162 /// If InsertBefore is not null, this function will duplicate (modified) 02163 /// insertvalues when a part of a nested struct is extracted. 02164 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 02165 Instruction *InsertBefore) { 02166 // Nothing to index? Just return V then (this is useful at the end of our 02167 // recursion). 02168 if (idx_range.empty()) 02169 return V; 02170 // We have indices, so V should have an indexable type. 02171 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 02172 "Not looking at a struct or array?"); 02173 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 02174 "Invalid indices for type?"); 02175 02176 if (Constant *C = dyn_cast<Constant>(V)) { 02177 C = C->getAggregateElement(idx_range[0]); 02178 if (!C) return nullptr; 02179 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 02180 } 02181 02182 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 02183 // Loop the indices for the insertvalue instruction in parallel with the 02184 // requested indices 02185 const unsigned *req_idx = idx_range.begin(); 02186 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 02187 i != e; ++i, ++req_idx) { 02188 if (req_idx == idx_range.end()) { 02189 // We can't handle this without inserting insertvalues 02190 if (!InsertBefore) 02191 return nullptr; 02192 02193 // The requested index identifies a part of a nested aggregate. Handle 02194 // this specially. For example, 02195 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 02196 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 02197 // %C = extractvalue {i32, { i32, i32 } } %B, 1 02198 // This can be changed into 02199 // %A = insertvalue {i32, i32 } undef, i32 10, 0 02200 // %C = insertvalue {i32, i32 } %A, i32 11, 1 02201 // which allows the unused 0,0 element from the nested struct to be 02202 // removed. 02203 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 02204 InsertBefore); 02205 } 02206 02207 // This insert value inserts something else than what we are looking for. 02208 // See if the (aggregrate) value inserted into has the value we are 02209 // looking for, then. 02210 if (*req_idx != *i) 02211 return FindInsertedValue(I->getAggregateOperand(), idx_range, 02212 InsertBefore); 02213 } 02214 // If we end up here, the indices of the insertvalue match with those 02215 // requested (though possibly only partially). Now we recursively look at 02216 // the inserted value, passing any remaining indices. 02217 return FindInsertedValue(I->getInsertedValueOperand(), 02218 makeArrayRef(req_idx, idx_range.end()), 02219 InsertBefore); 02220 } 02221 02222 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 02223 // If we're extracting a value from an aggregrate that was extracted from 02224 // something else, we can extract from that something else directly instead. 02225 // However, we will need to chain I's indices with the requested indices. 02226 02227 // Calculate the number of indices required 02228 unsigned size = I->getNumIndices() + idx_range.size(); 02229 // Allocate some space to put the new indices in 02230 SmallVector<unsigned, 5> Idxs; 02231 Idxs.reserve(size); 02232 // Add indices from the extract value instruction 02233 Idxs.append(I->idx_begin(), I->idx_end()); 02234 02235 // Add requested indices 02236 Idxs.append(idx_range.begin(), idx_range.end()); 02237 02238 assert(Idxs.size() == size 02239 && "Number of indices added not correct?"); 02240 02241 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 02242 } 02243 // Otherwise, we don't know (such as, extracting from a function return value 02244 // or load instruction) 02245 return nullptr; 02246 } 02247 02248 /// GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if 02249 /// it can be expressed as a base pointer plus a constant offset. Return the 02250 /// base and offset to the caller. 02251 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 02252 const DataLayout *DL) { 02253 // Without DataLayout, conservatively assume 64-bit offsets, which is 02254 // the widest we support. 02255 unsigned BitWidth = DL ? DL->getPointerTypeSizeInBits(Ptr->getType()) : 64; 02256 APInt ByteOffset(BitWidth, 0); 02257 while (1) { 02258 if (Ptr->getType()->isVectorTy()) 02259 break; 02260 02261 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 02262 if (DL) { 02263 APInt GEPOffset(BitWidth, 0); 02264 if (!GEP->accumulateConstantOffset(*DL, GEPOffset)) 02265 break; 02266 02267 ByteOffset += GEPOffset; 02268 } 02269 02270 Ptr = GEP->getPointerOperand(); 02271 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || 02272 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { 02273 Ptr = cast<Operator>(Ptr)->getOperand(0); 02274 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 02275 if (GA->mayBeOverridden()) 02276 break; 02277 Ptr = GA->getAliasee(); 02278 } else { 02279 break; 02280 } 02281 } 02282 Offset = ByteOffset.getSExtValue(); 02283 return Ptr; 02284 } 02285 02286 02287 /// getConstantStringInfo - This function computes the length of a 02288 /// null-terminated C string pointed to by V. If successful, it returns true 02289 /// and returns the string in Str. If unsuccessful, it returns false. 02290 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 02291 uint64_t Offset, bool TrimAtNul) { 02292 assert(V); 02293 02294 // Look through bitcast instructions and geps. 02295 V = V->stripPointerCasts(); 02296 02297 // If the value is a GEP instructionor constant expression, treat it as an 02298 // offset. 02299 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 02300 // Make sure the GEP has exactly three arguments. 02301 if (GEP->getNumOperands() != 3) 02302 return false; 02303 02304 // Make sure the index-ee is a pointer to array of i8. 02305 PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType()); 02306 ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType()); 02307 if (!AT || !AT->getElementType()->isIntegerTy(8)) 02308 return false; 02309 02310 // Check to make sure that the first operand of the GEP is an integer and 02311 // has value 0 so that we are sure we're indexing into the initializer. 02312 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 02313 if (!FirstIdx || !FirstIdx->isZero()) 02314 return false; 02315 02316 // If the second index isn't a ConstantInt, then this is a variable index 02317 // into the array. If this occurs, we can't say anything meaningful about 02318 // the string. 02319 uint64_t StartIdx = 0; 02320 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 02321 StartIdx = CI->getZExtValue(); 02322 else 02323 return false; 02324 return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset); 02325 } 02326 02327 // The GEP instruction, constant or instruction, must reference a global 02328 // variable that is a constant and is initialized. The referenced constant 02329 // initializer is the array that we'll use for optimization. 02330 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 02331 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 02332 return false; 02333 02334 // Handle the all-zeros case 02335 if (GV->getInitializer()->isNullValue()) { 02336 // This is a degenerate case. The initializer is constant zero so the 02337 // length of the string must be zero. 02338 Str = ""; 02339 return true; 02340 } 02341 02342 // Must be a Constant Array 02343 const ConstantDataArray *Array = 02344 dyn_cast<ConstantDataArray>(GV->getInitializer()); 02345 if (!Array || !Array->isString()) 02346 return false; 02347 02348 // Get the number of elements in the array 02349 uint64_t NumElts = Array->getType()->getArrayNumElements(); 02350 02351 // Start out with the entire array in the StringRef. 02352 Str = Array->getAsString(); 02353 02354 if (Offset > NumElts) 02355 return false; 02356 02357 // Skip over 'offset' bytes. 02358 Str = Str.substr(Offset); 02359 02360 if (TrimAtNul) { 02361 // Trim off the \0 and anything after it. If the array is not nul 02362 // terminated, we just return the whole end of string. The client may know 02363 // some other way that the string is length-bound. 02364 Str = Str.substr(0, Str.find('\0')); 02365 } 02366 return true; 02367 } 02368 02369 // These next two are very similar to the above, but also look through PHI 02370 // nodes. 02371 // TODO: See if we can integrate these two together. 02372 02373 /// GetStringLengthH - If we can compute the length of the string pointed to by 02374 /// the specified pointer, return 'len+1'. If we can't, return 0. 02375 static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) { 02376 // Look through noop bitcast instructions. 02377 V = V->stripPointerCasts(); 02378 02379 // If this is a PHI node, there are two cases: either we have already seen it 02380 // or we haven't. 02381 if (PHINode *PN = dyn_cast<PHINode>(V)) { 02382 if (!PHIs.insert(PN)) 02383 return ~0ULL; // already in the set. 02384 02385 // If it was new, see if all the input strings are the same length. 02386 uint64_t LenSoFar = ~0ULL; 02387 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 02388 uint64_t Len = GetStringLengthH(PN->getIncomingValue(i), PHIs); 02389 if (Len == 0) return 0; // Unknown length -> unknown. 02390 02391 if (Len == ~0ULL) continue; 02392 02393 if (Len != LenSoFar && LenSoFar != ~0ULL) 02394 return 0; // Disagree -> unknown. 02395 LenSoFar = Len; 02396 } 02397 02398 // Success, all agree. 02399 return LenSoFar; 02400 } 02401 02402 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 02403 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 02404 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs); 02405 if (Len1 == 0) return 0; 02406 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs); 02407 if (Len2 == 0) return 0; 02408 if (Len1 == ~0ULL) return Len2; 02409 if (Len2 == ~0ULL) return Len1; 02410 if (Len1 != Len2) return 0; 02411 return Len1; 02412 } 02413 02414 // Otherwise, see if we can read the string. 02415 StringRef StrData; 02416 if (!getConstantStringInfo(V, StrData)) 02417 return 0; 02418 02419 return StrData.size()+1; 02420 } 02421 02422 /// GetStringLength - If we can compute the length of the string pointed to by 02423 /// the specified pointer, return 'len+1'. If we can't, return 0. 02424 uint64_t llvm::GetStringLength(Value *V) { 02425 if (!V->getType()->isPointerTy()) return 0; 02426 02427 SmallPtrSet<PHINode*, 32> PHIs; 02428 uint64_t Len = GetStringLengthH(V, PHIs); 02429 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 02430 // an empty string as a length. 02431 return Len == ~0ULL ? 1 : Len; 02432 } 02433 02434 Value * 02435 llvm::GetUnderlyingObject(Value *V, const DataLayout *TD, unsigned MaxLookup) { 02436 if (!V->getType()->isPointerTy()) 02437 return V; 02438 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 02439 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 02440 V = GEP->getPointerOperand(); 02441 } else if (Operator::getOpcode(V) == Instruction::BitCast || 02442 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 02443 V = cast<Operator>(V)->getOperand(0); 02444 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 02445 if (GA->mayBeOverridden()) 02446 return V; 02447 V = GA->getAliasee(); 02448 } else { 02449 // See if InstructionSimplify knows any relevant tricks. 02450 if (Instruction *I = dyn_cast<Instruction>(V)) 02451 // TODO: Acquire a DominatorTree and AssumptionTracker and use them. 02452 if (Value *Simplified = SimplifyInstruction(I, TD, nullptr)) { 02453 V = Simplified; 02454 continue; 02455 } 02456 02457 return V; 02458 } 02459 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 02460 } 02461 return V; 02462 } 02463 02464 void 02465 llvm::GetUnderlyingObjects(Value *V, 02466 SmallVectorImpl<Value *> &Objects, 02467 const DataLayout *TD, 02468 unsigned MaxLookup) { 02469 SmallPtrSet<Value *, 4> Visited; 02470 SmallVector<Value *, 4> Worklist; 02471 Worklist.push_back(V); 02472 do { 02473 Value *P = Worklist.pop_back_val(); 02474 P = GetUnderlyingObject(P, TD, MaxLookup); 02475 02476 if (!Visited.insert(P)) 02477 continue; 02478 02479 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 02480 Worklist.push_back(SI->getTrueValue()); 02481 Worklist.push_back(SI->getFalseValue()); 02482 continue; 02483 } 02484 02485 if (PHINode *PN = dyn_cast<PHINode>(P)) { 02486 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 02487 Worklist.push_back(PN->getIncomingValue(i)); 02488 continue; 02489 } 02490 02491 Objects.push_back(P); 02492 } while (!Worklist.empty()); 02493 } 02494 02495 /// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer 02496 /// are lifetime markers. 02497 /// 02498 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 02499 for (const User *U : V->users()) { 02500 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 02501 if (!II) return false; 02502 02503 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 02504 II->getIntrinsicID() != Intrinsic::lifetime_end) 02505 return false; 02506 } 02507 return true; 02508 } 02509 02510 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 02511 const DataLayout *TD) { 02512 const Operator *Inst = dyn_cast<Operator>(V); 02513 if (!Inst) 02514 return false; 02515 02516 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 02517 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 02518 if (C->canTrap()) 02519 return false; 02520 02521 switch (Inst->getOpcode()) { 02522 default: 02523 return true; 02524 case Instruction::UDiv: 02525 case Instruction::URem: 02526 // x / y is undefined if y == 0, but calculations like x / 3 are safe. 02527 return isKnownNonZero(Inst->getOperand(1), TD); 02528 case Instruction::SDiv: 02529 case Instruction::SRem: { 02530 Value *Op = Inst->getOperand(1); 02531 // x / y is undefined if y == 0 02532 if (!isKnownNonZero(Op, TD)) 02533 return false; 02534 // x / y might be undefined if y == -1 02535 unsigned BitWidth = getBitWidth(Op->getType(), TD); 02536 if (BitWidth == 0) 02537 return false; 02538 APInt KnownZero(BitWidth, 0); 02539 APInt KnownOne(BitWidth, 0); 02540 computeKnownBits(Op, KnownZero, KnownOne, TD); 02541 return !!KnownZero; 02542 } 02543 case Instruction::Load: { 02544 const LoadInst *LI = cast<LoadInst>(Inst); 02545 if (!LI->isUnordered() || 02546 // Speculative load may create a race that did not exist in the source. 02547 LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread)) 02548 return false; 02549 return LI->getPointerOperand()->isDereferenceablePointer(TD); 02550 } 02551 case Instruction::Call: { 02552 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 02553 switch (II->getIntrinsicID()) { 02554 // These synthetic intrinsics have no side-effects and just mark 02555 // information about their operands. 02556 // FIXME: There are other no-op synthetic instructions that potentially 02557 // should be considered at least *safe* to speculate... 02558 case Intrinsic::dbg_declare: 02559 case Intrinsic::dbg_value: 02560 return true; 02561 02562 case Intrinsic::bswap: 02563 case Intrinsic::ctlz: 02564 case Intrinsic::ctpop: 02565 case Intrinsic::cttz: 02566 case Intrinsic::objectsize: 02567 case Intrinsic::sadd_with_overflow: 02568 case Intrinsic::smul_with_overflow: 02569 case Intrinsic::ssub_with_overflow: 02570 case Intrinsic::uadd_with_overflow: 02571 case Intrinsic::umul_with_overflow: 02572 case Intrinsic::usub_with_overflow: 02573 return true; 02574 // Sqrt should be OK, since the llvm sqrt intrinsic isn't defined to set 02575 // errno like libm sqrt would. 02576 case Intrinsic::sqrt: 02577 case Intrinsic::fma: 02578 case Intrinsic::fmuladd: 02579 case Intrinsic::fabs: 02580 return true; 02581 // TODO: some fp intrinsics are marked as having the same error handling 02582 // as libm. They're safe to speculate when they won't error. 02583 // TODO: are convert_{from,to}_fp16 safe? 02584 // TODO: can we list target-specific intrinsics here? 02585 default: break; 02586 } 02587 } 02588 return false; // The called function could have undefined behavior or 02589 // side-effects, even if marked readnone nounwind. 02590 } 02591 case Instruction::VAArg: 02592 case Instruction::Alloca: 02593 case Instruction::Invoke: 02594 case Instruction::PHI: 02595 case Instruction::Store: 02596 case Instruction::Ret: 02597 case Instruction::Br: 02598 case Instruction::IndirectBr: 02599 case Instruction::Switch: 02600 case Instruction::Unreachable: 02601 case Instruction::Fence: 02602 case Instruction::LandingPad: 02603 case Instruction::AtomicRMW: 02604 case Instruction::AtomicCmpXchg: 02605 case Instruction::Resume: 02606 return false; // Misc instructions which have effects 02607 } 02608 } 02609 02610 /// isKnownNonNull - Return true if we know that the specified value is never 02611 /// null. 02612 bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) { 02613 // Alloca never returns null, malloc might. 02614 if (isa<AllocaInst>(V)) return true; 02615 02616 // A byval, inalloca, or nonnull argument is never null. 02617 if (const Argument *A = dyn_cast<Argument>(V)) 02618 return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr(); 02619 02620 // Global values are not null unless extern weak. 02621 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) 02622 return !GV->hasExternalWeakLinkage(); 02623 02624 if (ImmutableCallSite CS = V) 02625 if (CS.isReturnNonNull()) 02626 return true; 02627 02628 // operator new never returns null. 02629 if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true)) 02630 return true; 02631 02632 return false; 02633 }