LLVM API Documentation

ScalarEvolutionExpander.cpp
Go to the documentation of this file.
00001 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file contains the implementation of the scalar evolution expander,
00011 // which is used to generate the code corresponding to a given scalar evolution
00012 // expression.
00013 //
00014 //===----------------------------------------------------------------------===//
00015 
00016 #include "llvm/Analysis/ScalarEvolutionExpander.h"
00017 #include "llvm/ADT/STLExtras.h"
00018 #include "llvm/ADT/SmallSet.h"
00019 #include "llvm/Analysis/InstructionSimplify.h"
00020 #include "llvm/Analysis/LoopInfo.h"
00021 #include "llvm/Analysis/TargetTransformInfo.h"
00022 #include "llvm/IR/DataLayout.h"
00023 #include "llvm/IR/Dominators.h"
00024 #include "llvm/IR/IntrinsicInst.h"
00025 #include "llvm/IR/LLVMContext.h"
00026 #include "llvm/Support/Debug.h"
00027 
00028 using namespace llvm;
00029 
00030 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
00031 /// reusing an existing cast if a suitable one exists, moving an existing
00032 /// cast if a suitable one exists but isn't in the right place, or
00033 /// creating a new one.
00034 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
00035                                        Instruction::CastOps Op,
00036                                        BasicBlock::iterator IP) {
00037   // This function must be called with the builder having a valid insertion
00038   // point. It doesn't need to be the actual IP where the uses of the returned
00039   // cast will be added, but it must dominate such IP.
00040   // We use this precondition to produce a cast that will dominate all its
00041   // uses. In particular, this is crucial for the case where the builder's
00042   // insertion point *is* the point where we were asked to put the cast.
00043   // Since we don't know the builder's insertion point is actually
00044   // where the uses will be added (only that it dominates it), we are
00045   // not allowed to move it.
00046   BasicBlock::iterator BIP = Builder.GetInsertPoint();
00047 
00048   Instruction *Ret = nullptr;
00049 
00050   // Check to see if there is already a cast!
00051   for (User *U : V->users())
00052     if (U->getType() == Ty)
00053       if (CastInst *CI = dyn_cast<CastInst>(U))
00054         if (CI->getOpcode() == Op) {
00055           // If the cast isn't where we want it, create a new cast at IP.
00056           // Likewise, do not reuse a cast at BIP because it must dominate
00057           // instructions that might be inserted before BIP.
00058           if (BasicBlock::iterator(CI) != IP || BIP == IP) {
00059             // Create a new cast, and leave the old cast in place in case
00060             // it is being used as an insert point. Clear its operand
00061             // so that it doesn't hold anything live.
00062             Ret = CastInst::Create(Op, V, Ty, "", IP);
00063             Ret->takeName(CI);
00064             CI->replaceAllUsesWith(Ret);
00065             CI->setOperand(0, UndefValue::get(V->getType()));
00066             break;
00067           }
00068           Ret = CI;
00069           break;
00070         }
00071 
00072   // Create a new cast.
00073   if (!Ret)
00074     Ret = CastInst::Create(Op, V, Ty, V->getName(), IP);
00075 
00076   // We assert at the end of the function since IP might point to an
00077   // instruction with different dominance properties than a cast
00078   // (an invoke for example) and not dominate BIP (but the cast does).
00079   assert(SE.DT->dominates(Ret, BIP));
00080 
00081   rememberInstruction(Ret);
00082   return Ret;
00083 }
00084 
00085 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
00086 /// which must be possible with a noop cast, doing what we can to share
00087 /// the casts.
00088 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
00089   Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
00090   assert((Op == Instruction::BitCast ||
00091           Op == Instruction::PtrToInt ||
00092           Op == Instruction::IntToPtr) &&
00093          "InsertNoopCastOfTo cannot perform non-noop casts!");
00094   assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
00095          "InsertNoopCastOfTo cannot change sizes!");
00096 
00097   // Short-circuit unnecessary bitcasts.
00098   if (Op == Instruction::BitCast) {
00099     if (V->getType() == Ty)
00100       return V;
00101     if (CastInst *CI = dyn_cast<CastInst>(V)) {
00102       if (CI->getOperand(0)->getType() == Ty)
00103         return CI->getOperand(0);
00104     }
00105   }
00106   // Short-circuit unnecessary inttoptr<->ptrtoint casts.
00107   if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
00108       SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
00109     if (CastInst *CI = dyn_cast<CastInst>(V))
00110       if ((CI->getOpcode() == Instruction::PtrToInt ||
00111            CI->getOpcode() == Instruction::IntToPtr) &&
00112           SE.getTypeSizeInBits(CI->getType()) ==
00113           SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
00114         return CI->getOperand(0);
00115     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
00116       if ((CE->getOpcode() == Instruction::PtrToInt ||
00117            CE->getOpcode() == Instruction::IntToPtr) &&
00118           SE.getTypeSizeInBits(CE->getType()) ==
00119           SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
00120         return CE->getOperand(0);
00121   }
00122 
00123   // Fold a cast of a constant.
00124   if (Constant *C = dyn_cast<Constant>(V))
00125     return ConstantExpr::getCast(Op, C, Ty);
00126 
00127   // Cast the argument at the beginning of the entry block, after
00128   // any bitcasts of other arguments.
00129   if (Argument *A = dyn_cast<Argument>(V)) {
00130     BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
00131     while ((isa<BitCastInst>(IP) &&
00132             isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
00133             cast<BitCastInst>(IP)->getOperand(0) != A) ||
00134            isa<DbgInfoIntrinsic>(IP) ||
00135            isa<LandingPadInst>(IP))
00136       ++IP;
00137     return ReuseOrCreateCast(A, Ty, Op, IP);
00138   }
00139 
00140   // Cast the instruction immediately after the instruction.
00141   Instruction *I = cast<Instruction>(V);
00142   BasicBlock::iterator IP = I; ++IP;
00143   if (InvokeInst *II = dyn_cast<InvokeInst>(I))
00144     IP = II->getNormalDest()->begin();
00145   while (isa<PHINode>(IP) || isa<LandingPadInst>(IP))
00146     ++IP;
00147   return ReuseOrCreateCast(I, Ty, Op, IP);
00148 }
00149 
00150 /// InsertBinop - Insert the specified binary operator, doing a small amount
00151 /// of work to avoid inserting an obviously redundant operation.
00152 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
00153                                  Value *LHS, Value *RHS) {
00154   // Fold a binop with constant operands.
00155   if (Constant *CLHS = dyn_cast<Constant>(LHS))
00156     if (Constant *CRHS = dyn_cast<Constant>(RHS))
00157       return ConstantExpr::get(Opcode, CLHS, CRHS);
00158 
00159   // Do a quick scan to see if we have this binop nearby.  If so, reuse it.
00160   unsigned ScanLimit = 6;
00161   BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
00162   // Scanning starts from the last instruction before the insertion point.
00163   BasicBlock::iterator IP = Builder.GetInsertPoint();
00164   if (IP != BlockBegin) {
00165     --IP;
00166     for (; ScanLimit; --IP, --ScanLimit) {
00167       // Don't count dbg.value against the ScanLimit, to avoid perturbing the
00168       // generated code.
00169       if (isa<DbgInfoIntrinsic>(IP))
00170         ScanLimit++;
00171       if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
00172           IP->getOperand(1) == RHS)
00173         return IP;
00174       if (IP == BlockBegin) break;
00175     }
00176   }
00177 
00178   // Save the original insertion point so we can restore it when we're done.
00179   DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
00180   BuilderType::InsertPointGuard Guard(Builder);
00181 
00182   // Move the insertion point out of as many loops as we can.
00183   while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
00184     if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
00185     BasicBlock *Preheader = L->getLoopPreheader();
00186     if (!Preheader) break;
00187 
00188     // Ok, move up a level.
00189     Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
00190   }
00191 
00192   // If we haven't found this binop, insert it.
00193   Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
00194   BO->setDebugLoc(Loc);
00195   rememberInstruction(BO);
00196 
00197   return BO;
00198 }
00199 
00200 /// FactorOutConstant - Test if S is divisible by Factor, using signed
00201 /// division. If so, update S with Factor divided out and return true.
00202 /// S need not be evenly divisible if a reasonable remainder can be
00203 /// computed.
00204 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
00205 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
00206 /// check to see if the divide was folded.
00207 static bool FactorOutConstant(const SCEV *&S,
00208                               const SCEV *&Remainder,
00209                               const SCEV *Factor,
00210                               ScalarEvolution &SE,
00211                               const DataLayout *DL) {
00212   // Everything is divisible by one.
00213   if (Factor->isOne())
00214     return true;
00215 
00216   // x/x == 1.
00217   if (S == Factor) {
00218     S = SE.getConstant(S->getType(), 1);
00219     return true;
00220   }
00221 
00222   // For a Constant, check for a multiple of the given factor.
00223   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
00224     // 0/x == 0.
00225     if (C->isZero())
00226       return true;
00227     // Check for divisibility.
00228     if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
00229       ConstantInt *CI =
00230         ConstantInt::get(SE.getContext(),
00231                          C->getValue()->getValue().sdiv(
00232                                                    FC->getValue()->getValue()));
00233       // If the quotient is zero and the remainder is non-zero, reject
00234       // the value at this scale. It will be considered for subsequent
00235       // smaller scales.
00236       if (!CI->isZero()) {
00237         const SCEV *Div = SE.getConstant(CI);
00238         S = Div;
00239         Remainder =
00240           SE.getAddExpr(Remainder,
00241                         SE.getConstant(C->getValue()->getValue().srem(
00242                                                   FC->getValue()->getValue())));
00243         return true;
00244       }
00245     }
00246   }
00247 
00248   // In a Mul, check if there is a constant operand which is a multiple
00249   // of the given factor.
00250   if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
00251     if (DL) {
00252       // With DataLayout, the size is known. Check if there is a constant
00253       // operand which is a multiple of the given factor. If so, we can
00254       // factor it.
00255       const SCEVConstant *FC = cast<SCEVConstant>(Factor);
00256       if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
00257         if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
00258           SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
00259           NewMulOps[0] =
00260             SE.getConstant(C->getValue()->getValue().sdiv(
00261                                                    FC->getValue()->getValue()));
00262           S = SE.getMulExpr(NewMulOps);
00263           return true;
00264         }
00265     } else {
00266       // Without DataLayout, check if Factor can be factored out of any of the
00267       // Mul's operands. If so, we can just remove it.
00268       for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
00269         const SCEV *SOp = M->getOperand(i);
00270         const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
00271         if (FactorOutConstant(SOp, Remainder, Factor, SE, DL) &&
00272             Remainder->isZero()) {
00273           SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
00274           NewMulOps[i] = SOp;
00275           S = SE.getMulExpr(NewMulOps);
00276           return true;
00277         }
00278       }
00279     }
00280   }
00281 
00282   // In an AddRec, check if both start and step are divisible.
00283   if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
00284     const SCEV *Step = A->getStepRecurrence(SE);
00285     const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
00286     if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
00287       return false;
00288     if (!StepRem->isZero())
00289       return false;
00290     const SCEV *Start = A->getStart();
00291     if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
00292       return false;
00293     S = SE.getAddRecExpr(Start, Step, A->getLoop(),
00294                          A->getNoWrapFlags(SCEV::FlagNW));
00295     return true;
00296   }
00297 
00298   return false;
00299 }
00300 
00301 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
00302 /// is the number of SCEVAddRecExprs present, which are kept at the end of
00303 /// the list.
00304 ///
00305 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
00306                                 Type *Ty,
00307                                 ScalarEvolution &SE) {
00308   unsigned NumAddRecs = 0;
00309   for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
00310     ++NumAddRecs;
00311   // Group Ops into non-addrecs and addrecs.
00312   SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
00313   SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
00314   // Let ScalarEvolution sort and simplify the non-addrecs list.
00315   const SCEV *Sum = NoAddRecs.empty() ?
00316                     SE.getConstant(Ty, 0) :
00317                     SE.getAddExpr(NoAddRecs);
00318   // If it returned an add, use the operands. Otherwise it simplified
00319   // the sum into a single value, so just use that.
00320   Ops.clear();
00321   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
00322     Ops.append(Add->op_begin(), Add->op_end());
00323   else if (!Sum->isZero())
00324     Ops.push_back(Sum);
00325   // Then append the addrecs.
00326   Ops.append(AddRecs.begin(), AddRecs.end());
00327 }
00328 
00329 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
00330 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
00331 /// This helps expose more opportunities for folding parts of the expressions
00332 /// into GEP indices.
00333 ///
00334 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
00335                          Type *Ty,
00336                          ScalarEvolution &SE) {
00337   // Find the addrecs.
00338   SmallVector<const SCEV *, 8> AddRecs;
00339   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
00340     while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
00341       const SCEV *Start = A->getStart();
00342       if (Start->isZero()) break;
00343       const SCEV *Zero = SE.getConstant(Ty, 0);
00344       AddRecs.push_back(SE.getAddRecExpr(Zero,
00345                                          A->getStepRecurrence(SE),
00346                                          A->getLoop(),
00347                                          A->getNoWrapFlags(SCEV::FlagNW)));
00348       if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
00349         Ops[i] = Zero;
00350         Ops.append(Add->op_begin(), Add->op_end());
00351         e += Add->getNumOperands();
00352       } else {
00353         Ops[i] = Start;
00354       }
00355     }
00356   if (!AddRecs.empty()) {
00357     // Add the addrecs onto the end of the list.
00358     Ops.append(AddRecs.begin(), AddRecs.end());
00359     // Resort the operand list, moving any constants to the front.
00360     SimplifyAddOperands(Ops, Ty, SE);
00361   }
00362 }
00363 
00364 /// expandAddToGEP - Expand an addition expression with a pointer type into
00365 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
00366 /// BasicAliasAnalysis and other passes analyze the result. See the rules
00367 /// for getelementptr vs. inttoptr in
00368 /// http://llvm.org/docs/LangRef.html#pointeraliasing
00369 /// for details.
00370 ///
00371 /// Design note: The correctness of using getelementptr here depends on
00372 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
00373 /// they may introduce pointer arithmetic which may not be safely converted
00374 /// into getelementptr.
00375 ///
00376 /// Design note: It might seem desirable for this function to be more
00377 /// loop-aware. If some of the indices are loop-invariant while others
00378 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
00379 /// loop-invariant portions of the overall computation outside the loop.
00380 /// However, there are a few reasons this is not done here. Hoisting simple
00381 /// arithmetic is a low-level optimization that often isn't very
00382 /// important until late in the optimization process. In fact, passes
00383 /// like InstructionCombining will combine GEPs, even if it means
00384 /// pushing loop-invariant computation down into loops, so even if the
00385 /// GEPs were split here, the work would quickly be undone. The
00386 /// LoopStrengthReduction pass, which is usually run quite late (and
00387 /// after the last InstructionCombining pass), takes care of hoisting
00388 /// loop-invariant portions of expressions, after considering what
00389 /// can be folded using target addressing modes.
00390 ///
00391 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
00392                                     const SCEV *const *op_end,
00393                                     PointerType *PTy,
00394                                     Type *Ty,
00395                                     Value *V) {
00396   Type *ElTy = PTy->getElementType();
00397   SmallVector<Value *, 4> GepIndices;
00398   SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
00399   bool AnyNonZeroIndices = false;
00400 
00401   // Split AddRecs up into parts as either of the parts may be usable
00402   // without the other.
00403   SplitAddRecs(Ops, Ty, SE);
00404 
00405   Type *IntPtrTy = SE.DL
00406                  ? SE.DL->getIntPtrType(PTy)
00407                  : Type::getInt64Ty(PTy->getContext());
00408 
00409   // Descend down the pointer's type and attempt to convert the other
00410   // operands into GEP indices, at each level. The first index in a GEP
00411   // indexes into the array implied by the pointer operand; the rest of
00412   // the indices index into the element or field type selected by the
00413   // preceding index.
00414   for (;;) {
00415     // If the scale size is not 0, attempt to factor out a scale for
00416     // array indexing.
00417     SmallVector<const SCEV *, 8> ScaledOps;
00418     if (ElTy->isSized()) {
00419       const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
00420       if (!ElSize->isZero()) {
00421         SmallVector<const SCEV *, 8> NewOps;
00422         for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
00423           const SCEV *Op = Ops[i];
00424           const SCEV *Remainder = SE.getConstant(Ty, 0);
00425           if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.DL)) {
00426             // Op now has ElSize factored out.
00427             ScaledOps.push_back(Op);
00428             if (!Remainder->isZero())
00429               NewOps.push_back(Remainder);
00430             AnyNonZeroIndices = true;
00431           } else {
00432             // The operand was not divisible, so add it to the list of operands
00433             // we'll scan next iteration.
00434             NewOps.push_back(Ops[i]);
00435           }
00436         }
00437         // If we made any changes, update Ops.
00438         if (!ScaledOps.empty()) {
00439           Ops = NewOps;
00440           SimplifyAddOperands(Ops, Ty, SE);
00441         }
00442       }
00443     }
00444 
00445     // Record the scaled array index for this level of the type. If
00446     // we didn't find any operands that could be factored, tentatively
00447     // assume that element zero was selected (since the zero offset
00448     // would obviously be folded away).
00449     Value *Scaled = ScaledOps.empty() ?
00450                     Constant::getNullValue(Ty) :
00451                     expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
00452     GepIndices.push_back(Scaled);
00453 
00454     // Collect struct field index operands.
00455     while (StructType *STy = dyn_cast<StructType>(ElTy)) {
00456       bool FoundFieldNo = false;
00457       // An empty struct has no fields.
00458       if (STy->getNumElements() == 0) break;
00459       if (SE.DL) {
00460         // With DataLayout, field offsets are known. See if a constant offset
00461         // falls within any of the struct fields.
00462         if (Ops.empty()) break;
00463         if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
00464           if (SE.getTypeSizeInBits(C->getType()) <= 64) {
00465             const StructLayout &SL = *SE.DL->getStructLayout(STy);
00466             uint64_t FullOffset = C->getValue()->getZExtValue();
00467             if (FullOffset < SL.getSizeInBytes()) {
00468               unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
00469               GepIndices.push_back(
00470                   ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
00471               ElTy = STy->getTypeAtIndex(ElIdx);
00472               Ops[0] =
00473                 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
00474               AnyNonZeroIndices = true;
00475               FoundFieldNo = true;
00476             }
00477           }
00478       } else {
00479         // Without DataLayout, just check for an offsetof expression of the
00480         // appropriate struct type.
00481         for (unsigned i = 0, e = Ops.size(); i != e; ++i)
00482           if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
00483             Type *CTy;
00484             Constant *FieldNo;
00485             if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
00486               GepIndices.push_back(FieldNo);
00487               ElTy =
00488                 STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue());
00489               Ops[i] = SE.getConstant(Ty, 0);
00490               AnyNonZeroIndices = true;
00491               FoundFieldNo = true;
00492               break;
00493             }
00494           }
00495       }
00496       // If no struct field offsets were found, tentatively assume that
00497       // field zero was selected (since the zero offset would obviously
00498       // be folded away).
00499       if (!FoundFieldNo) {
00500         ElTy = STy->getTypeAtIndex(0u);
00501         GepIndices.push_back(
00502           Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
00503       }
00504     }
00505 
00506     if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
00507       ElTy = ATy->getElementType();
00508     else
00509       break;
00510   }
00511 
00512   // If none of the operands were convertible to proper GEP indices, cast
00513   // the base to i8* and do an ugly getelementptr with that. It's still
00514   // better than ptrtoint+arithmetic+inttoptr at least.
00515   if (!AnyNonZeroIndices) {
00516     // Cast the base to i8*.
00517     V = InsertNoopCastOfTo(V,
00518        Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
00519 
00520     assert(!isa<Instruction>(V) ||
00521            SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
00522 
00523     // Expand the operands for a plain byte offset.
00524     Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
00525 
00526     // Fold a GEP with constant operands.
00527     if (Constant *CLHS = dyn_cast<Constant>(V))
00528       if (Constant *CRHS = dyn_cast<Constant>(Idx))
00529         return ConstantExpr::getGetElementPtr(CLHS, CRHS);
00530 
00531     // Do a quick scan to see if we have this GEP nearby.  If so, reuse it.
00532     unsigned ScanLimit = 6;
00533     BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
00534     // Scanning starts from the last instruction before the insertion point.
00535     BasicBlock::iterator IP = Builder.GetInsertPoint();
00536     if (IP != BlockBegin) {
00537       --IP;
00538       for (; ScanLimit; --IP, --ScanLimit) {
00539         // Don't count dbg.value against the ScanLimit, to avoid perturbing the
00540         // generated code.
00541         if (isa<DbgInfoIntrinsic>(IP))
00542           ScanLimit++;
00543         if (IP->getOpcode() == Instruction::GetElementPtr &&
00544             IP->getOperand(0) == V && IP->getOperand(1) == Idx)
00545           return IP;
00546         if (IP == BlockBegin) break;
00547       }
00548     }
00549 
00550     // Save the original insertion point so we can restore it when we're done.
00551     BuilderType::InsertPointGuard Guard(Builder);
00552 
00553     // Move the insertion point out of as many loops as we can.
00554     while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
00555       if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
00556       BasicBlock *Preheader = L->getLoopPreheader();
00557       if (!Preheader) break;
00558 
00559       // Ok, move up a level.
00560       Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
00561     }
00562 
00563     // Emit a GEP.
00564     Value *GEP = Builder.CreateGEP(V, Idx, "uglygep");
00565     rememberInstruction(GEP);
00566 
00567     return GEP;
00568   }
00569 
00570   // Save the original insertion point so we can restore it when we're done.
00571   BuilderType::InsertPoint SaveInsertPt = Builder.saveIP();
00572 
00573   // Move the insertion point out of as many loops as we can.
00574   while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
00575     if (!L->isLoopInvariant(V)) break;
00576 
00577     bool AnyIndexNotLoopInvariant = false;
00578     for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(),
00579          E = GepIndices.end(); I != E; ++I)
00580       if (!L->isLoopInvariant(*I)) {
00581         AnyIndexNotLoopInvariant = true;
00582         break;
00583       }
00584     if (AnyIndexNotLoopInvariant)
00585       break;
00586 
00587     BasicBlock *Preheader = L->getLoopPreheader();
00588     if (!Preheader) break;
00589 
00590     // Ok, move up a level.
00591     Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
00592   }
00593 
00594   // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
00595   // because ScalarEvolution may have changed the address arithmetic to
00596   // compute a value which is beyond the end of the allocated object.
00597   Value *Casted = V;
00598   if (V->getType() != PTy)
00599     Casted = InsertNoopCastOfTo(Casted, PTy);
00600   Value *GEP = Builder.CreateGEP(Casted,
00601                                  GepIndices,
00602                                  "scevgep");
00603   Ops.push_back(SE.getUnknown(GEP));
00604   rememberInstruction(GEP);
00605 
00606   // Restore the original insert point.
00607   Builder.restoreIP(SaveInsertPt);
00608 
00609   return expand(SE.getAddExpr(Ops));
00610 }
00611 
00612 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
00613 /// SCEV expansion. If they are nested, this is the most nested. If they are
00614 /// neighboring, pick the later.
00615 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
00616                                         DominatorTree &DT) {
00617   if (!A) return B;
00618   if (!B) return A;
00619   if (A->contains(B)) return B;
00620   if (B->contains(A)) return A;
00621   if (DT.dominates(A->getHeader(), B->getHeader())) return B;
00622   if (DT.dominates(B->getHeader(), A->getHeader())) return A;
00623   return A; // Arbitrarily break the tie.
00624 }
00625 
00626 /// getRelevantLoop - Get the most relevant loop associated with the given
00627 /// expression, according to PickMostRelevantLoop.
00628 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
00629   // Test whether we've already computed the most relevant loop for this SCEV.
00630   std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
00631     RelevantLoops.insert(std::make_pair(S, nullptr));
00632   if (!Pair.second)
00633     return Pair.first->second;
00634 
00635   if (isa<SCEVConstant>(S))
00636     // A constant has no relevant loops.
00637     return nullptr;
00638   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
00639     if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
00640       return Pair.first->second = SE.LI->getLoopFor(I->getParent());
00641     // A non-instruction has no relevant loops.
00642     return nullptr;
00643   }
00644   if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
00645     const Loop *L = nullptr;
00646     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
00647       L = AR->getLoop();
00648     for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
00649          I != E; ++I)
00650       L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
00651     return RelevantLoops[N] = L;
00652   }
00653   if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
00654     const Loop *Result = getRelevantLoop(C->getOperand());
00655     return RelevantLoops[C] = Result;
00656   }
00657   if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
00658     const Loop *Result =
00659       PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
00660                            getRelevantLoop(D->getRHS()),
00661                            *SE.DT);
00662     return RelevantLoops[D] = Result;
00663   }
00664   llvm_unreachable("Unexpected SCEV type!");
00665 }
00666 
00667 namespace {
00668 
00669 /// LoopCompare - Compare loops by PickMostRelevantLoop.
00670 class LoopCompare {
00671   DominatorTree &DT;
00672 public:
00673   explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
00674 
00675   bool operator()(std::pair<const Loop *, const SCEV *> LHS,
00676                   std::pair<const Loop *, const SCEV *> RHS) const {
00677     // Keep pointer operands sorted at the end.
00678     if (LHS.second->getType()->isPointerTy() !=
00679         RHS.second->getType()->isPointerTy())
00680       return LHS.second->getType()->isPointerTy();
00681 
00682     // Compare loops with PickMostRelevantLoop.
00683     if (LHS.first != RHS.first)
00684       return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
00685 
00686     // If one operand is a non-constant negative and the other is not,
00687     // put the non-constant negative on the right so that a sub can
00688     // be used instead of a negate and add.
00689     if (LHS.second->isNonConstantNegative()) {
00690       if (!RHS.second->isNonConstantNegative())
00691         return false;
00692     } else if (RHS.second->isNonConstantNegative())
00693       return true;
00694 
00695     // Otherwise they are equivalent according to this comparison.
00696     return false;
00697   }
00698 };
00699 
00700 }
00701 
00702 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
00703   Type *Ty = SE.getEffectiveSCEVType(S->getType());
00704 
00705   // Collect all the add operands in a loop, along with their associated loops.
00706   // Iterate in reverse so that constants are emitted last, all else equal, and
00707   // so that pointer operands are inserted first, which the code below relies on
00708   // to form more involved GEPs.
00709   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
00710   for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
00711        E(S->op_begin()); I != E; ++I)
00712     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
00713 
00714   // Sort by loop. Use a stable sort so that constants follow non-constants and
00715   // pointer operands precede non-pointer operands.
00716   std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
00717 
00718   // Emit instructions to add all the operands. Hoist as much as possible
00719   // out of loops, and form meaningful getelementptrs where possible.
00720   Value *Sum = nullptr;
00721   for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
00722        I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
00723     const Loop *CurLoop = I->first;
00724     const SCEV *Op = I->second;
00725     if (!Sum) {
00726       // This is the first operand. Just expand it.
00727       Sum = expand(Op);
00728       ++I;
00729     } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
00730       // The running sum expression is a pointer. Try to form a getelementptr
00731       // at this level with that as the base.
00732       SmallVector<const SCEV *, 4> NewOps;
00733       for (; I != E && I->first == CurLoop; ++I) {
00734         // If the operand is SCEVUnknown and not instructions, peek through
00735         // it, to enable more of it to be folded into the GEP.
00736         const SCEV *X = I->second;
00737         if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
00738           if (!isa<Instruction>(U->getValue()))
00739             X = SE.getSCEV(U->getValue());
00740         NewOps.push_back(X);
00741       }
00742       Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
00743     } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
00744       // The running sum is an integer, and there's a pointer at this level.
00745       // Try to form a getelementptr. If the running sum is instructions,
00746       // use a SCEVUnknown to avoid re-analyzing them.
00747       SmallVector<const SCEV *, 4> NewOps;
00748       NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
00749                                                SE.getSCEV(Sum));
00750       for (++I; I != E && I->first == CurLoop; ++I)
00751         NewOps.push_back(I->second);
00752       Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
00753     } else if (Op->isNonConstantNegative()) {
00754       // Instead of doing a negate and add, just do a subtract.
00755       Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
00756       Sum = InsertNoopCastOfTo(Sum, Ty);
00757       Sum = InsertBinop(Instruction::Sub, Sum, W);
00758       ++I;
00759     } else {
00760       // A simple add.
00761       Value *W = expandCodeFor(Op, Ty);
00762       Sum = InsertNoopCastOfTo(Sum, Ty);
00763       // Canonicalize a constant to the RHS.
00764       if (isa<Constant>(Sum)) std::swap(Sum, W);
00765       Sum = InsertBinop(Instruction::Add, Sum, W);
00766       ++I;
00767     }
00768   }
00769 
00770   return Sum;
00771 }
00772 
00773 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
00774   Type *Ty = SE.getEffectiveSCEVType(S->getType());
00775 
00776   // Collect all the mul operands in a loop, along with their associated loops.
00777   // Iterate in reverse so that constants are emitted last, all else equal.
00778   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
00779   for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
00780        E(S->op_begin()); I != E; ++I)
00781     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
00782 
00783   // Sort by loop. Use a stable sort so that constants follow non-constants.
00784   std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
00785 
00786   // Emit instructions to mul all the operands. Hoist as much as possible
00787   // out of loops.
00788   Value *Prod = nullptr;
00789   for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
00790        I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
00791     const SCEV *Op = I->second;
00792     if (!Prod) {
00793       // This is the first operand. Just expand it.
00794       Prod = expand(Op);
00795       ++I;
00796     } else if (Op->isAllOnesValue()) {
00797       // Instead of doing a multiply by negative one, just do a negate.
00798       Prod = InsertNoopCastOfTo(Prod, Ty);
00799       Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
00800       ++I;
00801     } else {
00802       // A simple mul.
00803       Value *W = expandCodeFor(Op, Ty);
00804       Prod = InsertNoopCastOfTo(Prod, Ty);
00805       // Canonicalize a constant to the RHS.
00806       if (isa<Constant>(Prod)) std::swap(Prod, W);
00807       Prod = InsertBinop(Instruction::Mul, Prod, W);
00808       ++I;
00809     }
00810   }
00811 
00812   return Prod;
00813 }
00814 
00815 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
00816   Type *Ty = SE.getEffectiveSCEVType(S->getType());
00817 
00818   Value *LHS = expandCodeFor(S->getLHS(), Ty);
00819   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
00820     const APInt &RHS = SC->getValue()->getValue();
00821     if (RHS.isPowerOf2())
00822       return InsertBinop(Instruction::LShr, LHS,
00823                          ConstantInt::get(Ty, RHS.logBase2()));
00824   }
00825 
00826   Value *RHS = expandCodeFor(S->getRHS(), Ty);
00827   return InsertBinop(Instruction::UDiv, LHS, RHS);
00828 }
00829 
00830 /// Move parts of Base into Rest to leave Base with the minimal
00831 /// expression that provides a pointer operand suitable for a
00832 /// GEP expansion.
00833 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
00834                               ScalarEvolution &SE) {
00835   while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
00836     Base = A->getStart();
00837     Rest = SE.getAddExpr(Rest,
00838                          SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
00839                                           A->getStepRecurrence(SE),
00840                                           A->getLoop(),
00841                                           A->getNoWrapFlags(SCEV::FlagNW)));
00842   }
00843   if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
00844     Base = A->getOperand(A->getNumOperands()-1);
00845     SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
00846     NewAddOps.back() = Rest;
00847     Rest = SE.getAddExpr(NewAddOps);
00848     ExposePointerBase(Base, Rest, SE);
00849   }
00850 }
00851 
00852 /// Determine if this is a well-behaved chain of instructions leading back to
00853 /// the PHI. If so, it may be reused by expanded expressions.
00854 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
00855                                          const Loop *L) {
00856   if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
00857       (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
00858     return false;
00859   // If any of the operands don't dominate the insert position, bail.
00860   // Addrec operands are always loop-invariant, so this can only happen
00861   // if there are instructions which haven't been hoisted.
00862   if (L == IVIncInsertLoop) {
00863     for (User::op_iterator OI = IncV->op_begin()+1,
00864            OE = IncV->op_end(); OI != OE; ++OI)
00865       if (Instruction *OInst = dyn_cast<Instruction>(OI))
00866         if (!SE.DT->dominates(OInst, IVIncInsertPos))
00867           return false;
00868   }
00869   // Advance to the next instruction.
00870   IncV = dyn_cast<Instruction>(IncV->getOperand(0));
00871   if (!IncV)
00872     return false;
00873 
00874   if (IncV->mayHaveSideEffects())
00875     return false;
00876 
00877   if (IncV != PN)
00878     return true;
00879 
00880   return isNormalAddRecExprPHI(PN, IncV, L);
00881 }
00882 
00883 /// getIVIncOperand returns an induction variable increment's induction
00884 /// variable operand.
00885 ///
00886 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
00887 /// operands dominate InsertPos.
00888 ///
00889 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
00890 /// simple patterns generated by getAddRecExprPHILiterally and
00891 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
00892 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
00893                                            Instruction *InsertPos,
00894                                            bool allowScale) {
00895   if (IncV == InsertPos)
00896     return nullptr;
00897 
00898   switch (IncV->getOpcode()) {
00899   default:
00900     return nullptr;
00901   // Check for a simple Add/Sub or GEP of a loop invariant step.
00902   case Instruction::Add:
00903   case Instruction::Sub: {
00904     Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
00905     if (!OInst || SE.DT->dominates(OInst, InsertPos))
00906       return dyn_cast<Instruction>(IncV->getOperand(0));
00907     return nullptr;
00908   }
00909   case Instruction::BitCast:
00910     return dyn_cast<Instruction>(IncV->getOperand(0));
00911   case Instruction::GetElementPtr:
00912     for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end();
00913          I != E; ++I) {
00914       if (isa<Constant>(*I))
00915         continue;
00916       if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
00917         if (!SE.DT->dominates(OInst, InsertPos))
00918           return nullptr;
00919       }
00920       if (allowScale) {
00921         // allow any kind of GEP as long as it can be hoisted.
00922         continue;
00923       }
00924       // This must be a pointer addition of constants (pretty), which is already
00925       // handled, or some number of address-size elements (ugly). Ugly geps
00926       // have 2 operands. i1* is used by the expander to represent an
00927       // address-size element.
00928       if (IncV->getNumOperands() != 2)
00929         return nullptr;
00930       unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
00931       if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
00932           && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
00933         return nullptr;
00934       break;
00935     }
00936     return dyn_cast<Instruction>(IncV->getOperand(0));
00937   }
00938 }
00939 
00940 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
00941 /// it available to other uses in this loop. Recursively hoist any operands,
00942 /// until we reach a value that dominates InsertPos.
00943 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
00944   if (SE.DT->dominates(IncV, InsertPos))
00945       return true;
00946 
00947   // InsertPos must itself dominate IncV so that IncV's new position satisfies
00948   // its existing users.
00949   if (isa<PHINode>(InsertPos)
00950       || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent()))
00951     return false;
00952 
00953   // Check that the chain of IV operands leading back to Phi can be hoisted.
00954   SmallVector<Instruction*, 4> IVIncs;
00955   for(;;) {
00956     Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
00957     if (!Oper)
00958       return false;
00959     // IncV is safe to hoist.
00960     IVIncs.push_back(IncV);
00961     IncV = Oper;
00962     if (SE.DT->dominates(IncV, InsertPos))
00963       break;
00964   }
00965   for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(),
00966          E = IVIncs.rend(); I != E; ++I) {
00967     (*I)->moveBefore(InsertPos);
00968   }
00969   return true;
00970 }
00971 
00972 /// Determine if this cyclic phi is in a form that would have been generated by
00973 /// LSR. We don't care if the phi was actually expanded in this pass, as long
00974 /// as it is in a low-cost form, for example, no implied multiplication. This
00975 /// should match any patterns generated by getAddRecExprPHILiterally and
00976 /// expandAddtoGEP.
00977 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
00978                                            const Loop *L) {
00979   for(Instruction *IVOper = IncV;
00980       (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
00981                                 /*allowScale=*/false));) {
00982     if (IVOper == PN)
00983       return true;
00984   }
00985   return false;
00986 }
00987 
00988 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
00989 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
00990 /// need to materialize IV increments elsewhere to handle difficult situations.
00991 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
00992                                  Type *ExpandTy, Type *IntTy,
00993                                  bool useSubtract) {
00994   Value *IncV;
00995   // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
00996   if (ExpandTy->isPointerTy()) {
00997     PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
00998     // If the step isn't constant, don't use an implicitly scaled GEP, because
00999     // that would require a multiply inside the loop.
01000     if (!isa<ConstantInt>(StepV))
01001       GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
01002                                   GEPPtrTy->getAddressSpace());
01003     const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
01004     IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
01005     if (IncV->getType() != PN->getType()) {
01006       IncV = Builder.CreateBitCast(IncV, PN->getType());
01007       rememberInstruction(IncV);
01008     }
01009   } else {
01010     IncV = useSubtract ?
01011       Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
01012       Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
01013     rememberInstruction(IncV);
01014   }
01015   return IncV;
01016 }
01017 
01018 /// \brief Hoist the addrec instruction chain rooted in the loop phi above the
01019 /// position. This routine assumes that this is possible (has been checked).
01020 static void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
01021                            Instruction *Pos, PHINode *LoopPhi) {
01022   do {
01023     if (DT->dominates(InstToHoist, Pos))
01024       break;
01025     // Make sure the increment is where we want it. But don't move it
01026     // down past a potential existing post-inc user.
01027     InstToHoist->moveBefore(Pos);
01028     Pos = InstToHoist;
01029     InstToHoist = cast<Instruction>(InstToHoist->getOperand(0));
01030   } while (InstToHoist != LoopPhi);
01031 }
01032 
01033 /// \brief Check whether we can cheaply express the requested SCEV in terms of
01034 /// the available PHI SCEV by truncation and/or invertion of the step.
01035 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
01036                                     const SCEVAddRecExpr *Phi,
01037                                     const SCEVAddRecExpr *Requested,
01038                                     bool &InvertStep) {
01039   Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
01040   Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
01041 
01042   if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
01043     return false;
01044 
01045   // Try truncate it if necessary.
01046   Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
01047   if (!Phi)
01048     return false;
01049 
01050   // Check whether truncation will help.
01051   if (Phi == Requested) {
01052     InvertStep = false;
01053     return true;
01054   }
01055 
01056   // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
01057   if (SE.getAddExpr(Requested->getStart(),
01058                     SE.getNegativeSCEV(Requested)) == Phi) {
01059     InvertStep = true;
01060     return true;
01061   }
01062 
01063   return false;
01064 }
01065 
01066 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
01067 /// the base addrec, which is the addrec without any non-loop-dominating
01068 /// values, and return the PHI.
01069 PHINode *
01070 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
01071                                         const Loop *L,
01072                                         Type *ExpandTy,
01073                                         Type *IntTy,
01074                                         Type *&TruncTy,
01075                                         bool &InvertStep) {
01076   assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
01077 
01078   // Reuse a previously-inserted PHI, if present.
01079   BasicBlock *LatchBlock = L->getLoopLatch();
01080   if (LatchBlock) {
01081     PHINode *AddRecPhiMatch = nullptr;
01082     Instruction *IncV = nullptr;
01083     TruncTy = nullptr;
01084     InvertStep = false;
01085 
01086     // Only try partially matching scevs that need truncation and/or
01087     // step-inversion if we know this loop is outside the current loop.
01088     bool TryNonMatchingSCEV = IVIncInsertLoop &&
01089       SE.DT->properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
01090 
01091     for (BasicBlock::iterator I = L->getHeader()->begin();
01092          PHINode *PN = dyn_cast<PHINode>(I); ++I) {
01093       if (!SE.isSCEVable(PN->getType()))
01094         continue;
01095 
01096       const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN));
01097       if (!PhiSCEV)
01098         continue;
01099 
01100       bool IsMatchingSCEV = PhiSCEV == Normalized;
01101       // We only handle truncation and inversion of phi recurrences for the
01102       // expanded expression if the expanded expression's loop dominates the
01103       // loop we insert to. Check now, so we can bail out early.
01104       if (!IsMatchingSCEV && !TryNonMatchingSCEV)
01105           continue;
01106 
01107       Instruction *TempIncV =
01108           cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
01109 
01110       // Check whether we can reuse this PHI node.
01111       if (LSRMode) {
01112         if (!isExpandedAddRecExprPHI(PN, TempIncV, L))
01113           continue;
01114         if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
01115           continue;
01116       } else {
01117         if (!isNormalAddRecExprPHI(PN, TempIncV, L))
01118           continue;
01119       }
01120 
01121       // Stop if we have found an exact match SCEV.
01122       if (IsMatchingSCEV) {
01123         IncV = TempIncV;
01124         TruncTy = nullptr;
01125         InvertStep = false;
01126         AddRecPhiMatch = PN;
01127         break;
01128       }
01129 
01130       // Try whether the phi can be translated into the requested form
01131       // (truncated and/or offset by a constant).
01132       if ((!TruncTy || InvertStep) &&
01133           canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
01134         // Record the phi node. But don't stop we might find an exact match
01135         // later.
01136         AddRecPhiMatch = PN;
01137         IncV = TempIncV;
01138         TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
01139       }
01140     }
01141 
01142     if (AddRecPhiMatch) {
01143       // Potentially, move the increment. We have made sure in
01144       // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
01145       if (L == IVIncInsertLoop)
01146         hoistBeforePos(SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
01147 
01148       // Ok, the add recurrence looks usable.
01149       // Remember this PHI, even in post-inc mode.
01150       InsertedValues.insert(AddRecPhiMatch);
01151       // Remember the increment.
01152       rememberInstruction(IncV);
01153       return AddRecPhiMatch;
01154     }
01155   }
01156 
01157   // Save the original insertion point so we can restore it when we're done.
01158   BuilderType::InsertPointGuard Guard(Builder);
01159 
01160   // Another AddRec may need to be recursively expanded below. For example, if
01161   // this AddRec is quadratic, the StepV may itself be an AddRec in this
01162   // loop. Remove this loop from the PostIncLoops set before expanding such
01163   // AddRecs. Otherwise, we cannot find a valid position for the step
01164   // (i.e. StepV can never dominate its loop header).  Ideally, we could do
01165   // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
01166   // so it's not worth implementing SmallPtrSet::swap.
01167   PostIncLoopSet SavedPostIncLoops = PostIncLoops;
01168   PostIncLoops.clear();
01169 
01170   // Expand code for the start value.
01171   Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
01172                                 L->getHeader()->begin());
01173 
01174   // StartV must be hoisted into L's preheader to dominate the new phi.
01175   assert(!isa<Instruction>(StartV) ||
01176          SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
01177                                   L->getHeader()));
01178 
01179   // Expand code for the step value. Do this before creating the PHI so that PHI
01180   // reuse code doesn't see an incomplete PHI.
01181   const SCEV *Step = Normalized->getStepRecurrence(SE);
01182   // If the stride is negative, insert a sub instead of an add for the increment
01183   // (unless it's a constant, because subtracts of constants are canonicalized
01184   // to adds).
01185   bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
01186   if (useSubtract)
01187     Step = SE.getNegativeSCEV(Step);
01188   // Expand the step somewhere that dominates the loop header.
01189   Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
01190 
01191   // Create the PHI.
01192   BasicBlock *Header = L->getHeader();
01193   Builder.SetInsertPoint(Header, Header->begin());
01194   pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
01195   PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
01196                                   Twine(IVName) + ".iv");
01197   rememberInstruction(PN);
01198 
01199   // Create the step instructions and populate the PHI.
01200   for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
01201     BasicBlock *Pred = *HPI;
01202 
01203     // Add a start value.
01204     if (!L->contains(Pred)) {
01205       PN->addIncoming(StartV, Pred);
01206       continue;
01207     }
01208 
01209     // Create a step value and add it to the PHI.
01210     // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
01211     // instructions at IVIncInsertPos.
01212     Instruction *InsertPos = L == IVIncInsertLoop ?
01213       IVIncInsertPos : Pred->getTerminator();
01214     Builder.SetInsertPoint(InsertPos);
01215     Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
01216     if (isa<OverflowingBinaryOperator>(IncV)) {
01217       if (Normalized->getNoWrapFlags(SCEV::FlagNUW))
01218         cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
01219       if (Normalized->getNoWrapFlags(SCEV::FlagNSW))
01220         cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
01221     }
01222     PN->addIncoming(IncV, Pred);
01223   }
01224 
01225   // After expanding subexpressions, restore the PostIncLoops set so the caller
01226   // can ensure that IVIncrement dominates the current uses.
01227   PostIncLoops = SavedPostIncLoops;
01228 
01229   // Remember this PHI, even in post-inc mode.
01230   InsertedValues.insert(PN);
01231 
01232   return PN;
01233 }
01234 
01235 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
01236   Type *STy = S->getType();
01237   Type *IntTy = SE.getEffectiveSCEVType(STy);
01238   const Loop *L = S->getLoop();
01239 
01240   // Determine a normalized form of this expression, which is the expression
01241   // before any post-inc adjustment is made.
01242   const SCEVAddRecExpr *Normalized = S;
01243   if (PostIncLoops.count(L)) {
01244     PostIncLoopSet Loops;
01245     Loops.insert(L);
01246     Normalized =
01247       cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, nullptr,
01248                                                   nullptr, Loops, SE, *SE.DT));
01249   }
01250 
01251   // Strip off any non-loop-dominating component from the addrec start.
01252   const SCEV *Start = Normalized->getStart();
01253   const SCEV *PostLoopOffset = nullptr;
01254   if (!SE.properlyDominates(Start, L->getHeader())) {
01255     PostLoopOffset = Start;
01256     Start = SE.getConstant(Normalized->getType(), 0);
01257     Normalized = cast<SCEVAddRecExpr>(
01258       SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
01259                        Normalized->getLoop(),
01260                        Normalized->getNoWrapFlags(SCEV::FlagNW)));
01261   }
01262 
01263   // Strip off any non-loop-dominating component from the addrec step.
01264   const SCEV *Step = Normalized->getStepRecurrence(SE);
01265   const SCEV *PostLoopScale = nullptr;
01266   if (!SE.dominates(Step, L->getHeader())) {
01267     PostLoopScale = Step;
01268     Step = SE.getConstant(Normalized->getType(), 1);
01269     Normalized =
01270       cast<SCEVAddRecExpr>(SE.getAddRecExpr(
01271                              Start, Step, Normalized->getLoop(),
01272                              Normalized->getNoWrapFlags(SCEV::FlagNW)));
01273   }
01274 
01275   // Expand the core addrec. If we need post-loop scaling, force it to
01276   // expand to an integer type to avoid the need for additional casting.
01277   Type *ExpandTy = PostLoopScale ? IntTy : STy;
01278   // In some cases, we decide to reuse an existing phi node but need to truncate
01279   // it and/or invert the step.
01280   Type *TruncTy = nullptr;
01281   bool InvertStep = false;
01282   PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy,
01283                                           TruncTy, InvertStep);
01284 
01285   // Accommodate post-inc mode, if necessary.
01286   Value *Result;
01287   if (!PostIncLoops.count(L))
01288     Result = PN;
01289   else {
01290     // In PostInc mode, use the post-incremented value.
01291     BasicBlock *LatchBlock = L->getLoopLatch();
01292     assert(LatchBlock && "PostInc mode requires a unique loop latch!");
01293     Result = PN->getIncomingValueForBlock(LatchBlock);
01294 
01295     // For an expansion to use the postinc form, the client must call
01296     // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
01297     // or dominated by IVIncInsertPos.
01298     if (isa<Instruction>(Result)
01299         && !SE.DT->dominates(cast<Instruction>(Result),
01300                              Builder.GetInsertPoint())) {
01301       // The induction variable's postinc expansion does not dominate this use.
01302       // IVUsers tries to prevent this case, so it is rare. However, it can
01303       // happen when an IVUser outside the loop is not dominated by the latch
01304       // block. Adjusting IVIncInsertPos before expansion begins cannot handle
01305       // all cases. Consider a phi outide whose operand is replaced during
01306       // expansion with the value of the postinc user. Without fundamentally
01307       // changing the way postinc users are tracked, the only remedy is
01308       // inserting an extra IV increment. StepV might fold into PostLoopOffset,
01309       // but hopefully expandCodeFor handles that.
01310       bool useSubtract =
01311         !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
01312       if (useSubtract)
01313         Step = SE.getNegativeSCEV(Step);
01314       Value *StepV;
01315       {
01316         // Expand the step somewhere that dominates the loop header.
01317         BuilderType::InsertPointGuard Guard(Builder);
01318         StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
01319       }
01320       Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
01321     }
01322   }
01323 
01324   // We have decided to reuse an induction variable of a dominating loop. Apply
01325   // truncation and/or invertion of the step.
01326   if (TruncTy) {
01327     Type *ResTy = Result->getType();
01328     // Normalize the result type.
01329     if (ResTy != SE.getEffectiveSCEVType(ResTy))
01330       Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
01331     // Truncate the result.
01332     if (TruncTy != Result->getType()) {
01333       Result = Builder.CreateTrunc(Result, TruncTy);
01334       rememberInstruction(Result);
01335     }
01336     // Invert the result.
01337     if (InvertStep) {
01338       Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy),
01339                                  Result);
01340       rememberInstruction(Result);
01341     }
01342   }
01343 
01344   // Re-apply any non-loop-dominating scale.
01345   if (PostLoopScale) {
01346     assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
01347     Result = InsertNoopCastOfTo(Result, IntTy);
01348     Result = Builder.CreateMul(Result,
01349                                expandCodeFor(PostLoopScale, IntTy));
01350     rememberInstruction(Result);
01351   }
01352 
01353   // Re-apply any non-loop-dominating offset.
01354   if (PostLoopOffset) {
01355     if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
01356       const SCEV *const OffsetArray[1] = { PostLoopOffset };
01357       Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
01358     } else {
01359       Result = InsertNoopCastOfTo(Result, IntTy);
01360       Result = Builder.CreateAdd(Result,
01361                                  expandCodeFor(PostLoopOffset, IntTy));
01362       rememberInstruction(Result);
01363     }
01364   }
01365 
01366   return Result;
01367 }
01368 
01369 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
01370   if (!CanonicalMode) return expandAddRecExprLiterally(S);
01371 
01372   Type *Ty = SE.getEffectiveSCEVType(S->getType());
01373   const Loop *L = S->getLoop();
01374 
01375   // First check for an existing canonical IV in a suitable type.
01376   PHINode *CanonicalIV = nullptr;
01377   if (PHINode *PN = L->getCanonicalInductionVariable())
01378     if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
01379       CanonicalIV = PN;
01380 
01381   // Rewrite an AddRec in terms of the canonical induction variable, if
01382   // its type is more narrow.
01383   if (CanonicalIV &&
01384       SE.getTypeSizeInBits(CanonicalIV->getType()) >
01385       SE.getTypeSizeInBits(Ty)) {
01386     SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
01387     for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
01388       NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
01389     Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
01390                                        S->getNoWrapFlags(SCEV::FlagNW)));
01391     BasicBlock::iterator NewInsertPt =
01392       std::next(BasicBlock::iterator(cast<Instruction>(V)));
01393     BuilderType::InsertPointGuard Guard(Builder);
01394     while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) ||
01395            isa<LandingPadInst>(NewInsertPt))
01396       ++NewInsertPt;
01397     V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
01398                       NewInsertPt);
01399     return V;
01400   }
01401 
01402   // {X,+,F} --> X + {0,+,F}
01403   if (!S->getStart()->isZero()) {
01404     SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
01405     NewOps[0] = SE.getConstant(Ty, 0);
01406     const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
01407                                         S->getNoWrapFlags(SCEV::FlagNW));
01408 
01409     // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
01410     // comments on expandAddToGEP for details.
01411     const SCEV *Base = S->getStart();
01412     const SCEV *RestArray[1] = { Rest };
01413     // Dig into the expression to find the pointer base for a GEP.
01414     ExposePointerBase(Base, RestArray[0], SE);
01415     // If we found a pointer, expand the AddRec with a GEP.
01416     if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
01417       // Make sure the Base isn't something exotic, such as a multiplied
01418       // or divided pointer value. In those cases, the result type isn't
01419       // actually a pointer type.
01420       if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
01421         Value *StartV = expand(Base);
01422         assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
01423         return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
01424       }
01425     }
01426 
01427     // Just do a normal add. Pre-expand the operands to suppress folding.
01428     return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())),
01429                                 SE.getUnknown(expand(Rest))));
01430   }
01431 
01432   // If we don't yet have a canonical IV, create one.
01433   if (!CanonicalIV) {
01434     // Create and insert the PHI node for the induction variable in the
01435     // specified loop.
01436     BasicBlock *Header = L->getHeader();
01437     pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
01438     CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
01439                                   Header->begin());
01440     rememberInstruction(CanonicalIV);
01441 
01442     SmallSet<BasicBlock *, 4> PredSeen;
01443     Constant *One = ConstantInt::get(Ty, 1);
01444     for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
01445       BasicBlock *HP = *HPI;
01446       if (!PredSeen.insert(HP)) {
01447         // There must be an incoming value for each predecessor, even the
01448         // duplicates!
01449         CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
01450         continue;
01451       }
01452 
01453       if (L->contains(HP)) {
01454         // Insert a unit add instruction right before the terminator
01455         // corresponding to the back-edge.
01456         Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
01457                                                      "indvar.next",
01458                                                      HP->getTerminator());
01459         Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
01460         rememberInstruction(Add);
01461         CanonicalIV->addIncoming(Add, HP);
01462       } else {
01463         CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
01464       }
01465     }
01466   }
01467 
01468   // {0,+,1} --> Insert a canonical induction variable into the loop!
01469   if (S->isAffine() && S->getOperand(1)->isOne()) {
01470     assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
01471            "IVs with types different from the canonical IV should "
01472            "already have been handled!");
01473     return CanonicalIV;
01474   }
01475 
01476   // {0,+,F} --> {0,+,1} * F
01477 
01478   // If this is a simple linear addrec, emit it now as a special case.
01479   if (S->isAffine())    // {0,+,F} --> i*F
01480     return
01481       expand(SE.getTruncateOrNoop(
01482         SE.getMulExpr(SE.getUnknown(CanonicalIV),
01483                       SE.getNoopOrAnyExtend(S->getOperand(1),
01484                                             CanonicalIV->getType())),
01485         Ty));
01486 
01487   // If this is a chain of recurrences, turn it into a closed form, using the
01488   // folders, then expandCodeFor the closed form.  This allows the folders to
01489   // simplify the expression without having to build a bunch of special code
01490   // into this folder.
01491   const SCEV *IH = SE.getUnknown(CanonicalIV);   // Get I as a "symbolic" SCEV.
01492 
01493   // Promote S up to the canonical IV type, if the cast is foldable.
01494   const SCEV *NewS = S;
01495   const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
01496   if (isa<SCEVAddRecExpr>(Ext))
01497     NewS = Ext;
01498 
01499   const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
01500   //cerr << "Evaluated: " << *this << "\n     to: " << *V << "\n";
01501 
01502   // Truncate the result down to the original type, if needed.
01503   const SCEV *T = SE.getTruncateOrNoop(V, Ty);
01504   return expand(T);
01505 }
01506 
01507 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
01508   Type *Ty = SE.getEffectiveSCEVType(S->getType());
01509   Value *V = expandCodeFor(S->getOperand(),
01510                            SE.getEffectiveSCEVType(S->getOperand()->getType()));
01511   Value *I = Builder.CreateTrunc(V, Ty);
01512   rememberInstruction(I);
01513   return I;
01514 }
01515 
01516 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
01517   Type *Ty = SE.getEffectiveSCEVType(S->getType());
01518   Value *V = expandCodeFor(S->getOperand(),
01519                            SE.getEffectiveSCEVType(S->getOperand()->getType()));
01520   Value *I = Builder.CreateZExt(V, Ty);
01521   rememberInstruction(I);
01522   return I;
01523 }
01524 
01525 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
01526   Type *Ty = SE.getEffectiveSCEVType(S->getType());
01527   Value *V = expandCodeFor(S->getOperand(),
01528                            SE.getEffectiveSCEVType(S->getOperand()->getType()));
01529   Value *I = Builder.CreateSExt(V, Ty);
01530   rememberInstruction(I);
01531   return I;
01532 }
01533 
01534 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
01535   Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
01536   Type *Ty = LHS->getType();
01537   for (int i = S->getNumOperands()-2; i >= 0; --i) {
01538     // In the case of mixed integer and pointer types, do the
01539     // rest of the comparisons as integer.
01540     if (S->getOperand(i)->getType() != Ty) {
01541       Ty = SE.getEffectiveSCEVType(Ty);
01542       LHS = InsertNoopCastOfTo(LHS, Ty);
01543     }
01544     Value *RHS = expandCodeFor(S->getOperand(i), Ty);
01545     Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
01546     rememberInstruction(ICmp);
01547     Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
01548     rememberInstruction(Sel);
01549     LHS = Sel;
01550   }
01551   // In the case of mixed integer and pointer types, cast the
01552   // final result back to the pointer type.
01553   if (LHS->getType() != S->getType())
01554     LHS = InsertNoopCastOfTo(LHS, S->getType());
01555   return LHS;
01556 }
01557 
01558 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
01559   Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
01560   Type *Ty = LHS->getType();
01561   for (int i = S->getNumOperands()-2; i >= 0; --i) {
01562     // In the case of mixed integer and pointer types, do the
01563     // rest of the comparisons as integer.
01564     if (S->getOperand(i)->getType() != Ty) {
01565       Ty = SE.getEffectiveSCEVType(Ty);
01566       LHS = InsertNoopCastOfTo(LHS, Ty);
01567     }
01568     Value *RHS = expandCodeFor(S->getOperand(i), Ty);
01569     Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
01570     rememberInstruction(ICmp);
01571     Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
01572     rememberInstruction(Sel);
01573     LHS = Sel;
01574   }
01575   // In the case of mixed integer and pointer types, cast the
01576   // final result back to the pointer type.
01577   if (LHS->getType() != S->getType())
01578     LHS = InsertNoopCastOfTo(LHS, S->getType());
01579   return LHS;
01580 }
01581 
01582 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
01583                                    Instruction *IP) {
01584   Builder.SetInsertPoint(IP->getParent(), IP);
01585   return expandCodeFor(SH, Ty);
01586 }
01587 
01588 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
01589   // Expand the code for this SCEV.
01590   Value *V = expand(SH);
01591   if (Ty) {
01592     assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
01593            "non-trivial casts should be done with the SCEVs directly!");
01594     V = InsertNoopCastOfTo(V, Ty);
01595   }
01596   return V;
01597 }
01598 
01599 Value *SCEVExpander::expand(const SCEV *S) {
01600   // Compute an insertion point for this SCEV object. Hoist the instructions
01601   // as far out in the loop nest as possible.
01602   Instruction *InsertPt = Builder.GetInsertPoint();
01603   for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
01604        L = L->getParentLoop())
01605     if (SE.isLoopInvariant(S, L)) {
01606       if (!L) break;
01607       if (BasicBlock *Preheader = L->getLoopPreheader())
01608         InsertPt = Preheader->getTerminator();
01609       else {
01610         // LSR sets the insertion point for AddRec start/step values to the
01611         // block start to simplify value reuse, even though it's an invalid
01612         // position. SCEVExpander must correct for this in all cases.
01613         InsertPt = L->getHeader()->getFirstInsertionPt();
01614       }
01615     } else {
01616       // If the SCEV is computable at this level, insert it into the header
01617       // after the PHIs (and after any other instructions that we've inserted
01618       // there) so that it is guaranteed to dominate any user inside the loop.
01619       if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
01620         InsertPt = L->getHeader()->getFirstInsertionPt();
01621       while (InsertPt != Builder.GetInsertPoint()
01622              && (isInsertedInstruction(InsertPt)
01623                  || isa<DbgInfoIntrinsic>(InsertPt))) {
01624         InsertPt = std::next(BasicBlock::iterator(InsertPt));
01625       }
01626       break;
01627     }
01628 
01629   // Check to see if we already expanded this here.
01630   std::map<std::pair<const SCEV *, Instruction *>, TrackingVH<Value> >::iterator
01631     I = InsertedExpressions.find(std::make_pair(S, InsertPt));
01632   if (I != InsertedExpressions.end())
01633     return I->second;
01634 
01635   BuilderType::InsertPointGuard Guard(Builder);
01636   Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
01637 
01638   // Expand the expression into instructions.
01639   Value *V = visit(S);
01640 
01641   // Remember the expanded value for this SCEV at this location.
01642   //
01643   // This is independent of PostIncLoops. The mapped value simply materializes
01644   // the expression at this insertion point. If the mapped value happened to be
01645   // a postinc expansion, it could be reused by a non-postinc user, but only if
01646   // its insertion point was already at the head of the loop.
01647   InsertedExpressions[std::make_pair(S, InsertPt)] = V;
01648   return V;
01649 }
01650 
01651 void SCEVExpander::rememberInstruction(Value *I) {
01652   if (!PostIncLoops.empty())
01653     InsertedPostIncValues.insert(I);
01654   else
01655     InsertedValues.insert(I);
01656 }
01657 
01658 /// getOrInsertCanonicalInductionVariable - This method returns the
01659 /// canonical induction variable of the specified type for the specified
01660 /// loop (inserting one if there is none).  A canonical induction variable
01661 /// starts at zero and steps by one on each iteration.
01662 PHINode *
01663 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
01664                                                     Type *Ty) {
01665   assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
01666 
01667   // Build a SCEV for {0,+,1}<L>.
01668   // Conservatively use FlagAnyWrap for now.
01669   const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
01670                                    SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
01671 
01672   // Emit code for it.
01673   BuilderType::InsertPointGuard Guard(Builder);
01674   PHINode *V = cast<PHINode>(expandCodeFor(H, nullptr,
01675                                            L->getHeader()->begin()));
01676 
01677   return V;
01678 }
01679 
01680 /// replaceCongruentIVs - Check for congruent phis in this loop header and
01681 /// replace them with their most canonical representative. Return the number of
01682 /// phis eliminated.
01683 ///
01684 /// This does not depend on any SCEVExpander state but should be used in
01685 /// the same context that SCEVExpander is used.
01686 unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
01687                                            SmallVectorImpl<WeakVH> &DeadInsts,
01688                                            const TargetTransformInfo *TTI) {
01689   // Find integer phis in order of increasing width.
01690   SmallVector<PHINode*, 8> Phis;
01691   for (BasicBlock::iterator I = L->getHeader()->begin();
01692        PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
01693     Phis.push_back(Phi);
01694   }
01695   if (TTI)
01696     std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) {
01697       // Put pointers at the back and make sure pointer < pointer = false.
01698       if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
01699         return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
01700       return RHS->getType()->getPrimitiveSizeInBits() <
01701              LHS->getType()->getPrimitiveSizeInBits();
01702     });
01703 
01704   unsigned NumElim = 0;
01705   DenseMap<const SCEV *, PHINode *> ExprToIVMap;
01706   // Process phis from wide to narrow. Mapping wide phis to the their truncation
01707   // so narrow phis can reuse them.
01708   for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(),
01709          PEnd = Phis.end(); PIter != PEnd; ++PIter) {
01710     PHINode *Phi = *PIter;
01711 
01712     // Fold constant phis. They may be congruent to other constant phis and
01713     // would confuse the logic below that expects proper IVs.
01714     if (Value *V = SimplifyInstruction(Phi, SE.DL, SE.TLI, SE.DT, SE.AT)) {
01715       Phi->replaceAllUsesWith(V);
01716       DeadInsts.push_back(Phi);
01717       ++NumElim;
01718       DEBUG_WITH_TYPE(DebugType, dbgs()
01719                       << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
01720       continue;
01721     }
01722 
01723     if (!SE.isSCEVable(Phi->getType()))
01724       continue;
01725 
01726     PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
01727     if (!OrigPhiRef) {
01728       OrigPhiRef = Phi;
01729       if (Phi->getType()->isIntegerTy() && TTI
01730           && TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
01731         // This phi can be freely truncated to the narrowest phi type. Map the
01732         // truncated expression to it so it will be reused for narrow types.
01733         const SCEV *TruncExpr =
01734           SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
01735         ExprToIVMap[TruncExpr] = Phi;
01736       }
01737       continue;
01738     }
01739 
01740     // Replacing a pointer phi with an integer phi or vice-versa doesn't make
01741     // sense.
01742     if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
01743       continue;
01744 
01745     if (BasicBlock *LatchBlock = L->getLoopLatch()) {
01746       Instruction *OrigInc =
01747         cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock));
01748       Instruction *IsomorphicInc =
01749         cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
01750 
01751       // If this phi has the same width but is more canonical, replace the
01752       // original with it. As part of the "more canonical" determination,
01753       // respect a prior decision to use an IV chain.
01754       if (OrigPhiRef->getType() == Phi->getType()
01755           && !(ChainedPhis.count(Phi)
01756                || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L))
01757           && (ChainedPhis.count(Phi)
01758               || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
01759         std::swap(OrigPhiRef, Phi);
01760         std::swap(OrigInc, IsomorphicInc);
01761       }
01762       // Replacing the congruent phi is sufficient because acyclic redundancy
01763       // elimination, CSE/GVN, should handle the rest. However, once SCEV proves
01764       // that a phi is congruent, it's often the head of an IV user cycle that
01765       // is isomorphic with the original phi. It's worth eagerly cleaning up the
01766       // common case of a single IV increment so that DeleteDeadPHIs can remove
01767       // cycles that had postinc uses.
01768       const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc),
01769                                                    IsomorphicInc->getType());
01770       if (OrigInc != IsomorphicInc
01771           && TruncExpr == SE.getSCEV(IsomorphicInc)
01772           && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc))
01773               || hoistIVInc(OrigInc, IsomorphicInc))) {
01774         DEBUG_WITH_TYPE(DebugType, dbgs()
01775                         << "INDVARS: Eliminated congruent iv.inc: "
01776                         << *IsomorphicInc << '\n');
01777         Value *NewInc = OrigInc;
01778         if (OrigInc->getType() != IsomorphicInc->getType()) {
01779           Instruction *IP = isa<PHINode>(OrigInc)
01780             ? (Instruction*)L->getHeader()->getFirstInsertionPt()
01781             : OrigInc->getNextNode();
01782           IRBuilder<> Builder(IP);
01783           Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
01784           NewInc = Builder.
01785             CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
01786         }
01787         IsomorphicInc->replaceAllUsesWith(NewInc);
01788         DeadInsts.push_back(IsomorphicInc);
01789       }
01790     }
01791     DEBUG_WITH_TYPE(DebugType, dbgs()
01792                     << "INDVARS: Eliminated congruent iv: " << *Phi << '\n');
01793     ++NumElim;
01794     Value *NewIV = OrigPhiRef;
01795     if (OrigPhiRef->getType() != Phi->getType()) {
01796       IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
01797       Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
01798       NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
01799     }
01800     Phi->replaceAllUsesWith(NewIV);
01801     DeadInsts.push_back(Phi);
01802   }
01803   return NumElim;
01804 }
01805 
01806 namespace {
01807 // Search for a SCEV subexpression that is not safe to expand.  Any expression
01808 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
01809 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
01810 // instruction, but the important thing is that we prove the denominator is
01811 // nonzero before expansion.
01812 //
01813 // IVUsers already checks that IV-derived expressions are safe. So this check is
01814 // only needed when the expression includes some subexpression that is not IV
01815 // derived.
01816 //
01817 // Currently, we only allow division by a nonzero constant here. If this is
01818 // inadequate, we could easily allow division by SCEVUnknown by using
01819 // ValueTracking to check isKnownNonZero().
01820 //
01821 // We cannot generally expand recurrences unless the step dominates the loop
01822 // header. The expander handles the special case of affine recurrences by
01823 // scaling the recurrence outside the loop, but this technique isn't generally
01824 // applicable. Expanding a nested recurrence outside a loop requires computing
01825 // binomial coefficients. This could be done, but the recurrence has to be in a
01826 // perfectly reduced form, which can't be guaranteed.
01827 struct SCEVFindUnsafe {
01828   ScalarEvolution &SE;
01829   bool IsUnsafe;
01830 
01831   SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
01832 
01833   bool follow(const SCEV *S) {
01834     if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
01835       const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
01836       if (!SC || SC->getValue()->isZero()) {
01837         IsUnsafe = true;
01838         return false;
01839       }
01840     }
01841     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
01842       const SCEV *Step = AR->getStepRecurrence(SE);
01843       if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
01844         IsUnsafe = true;
01845         return false;
01846       }
01847     }
01848     return true;
01849   }
01850   bool isDone() const { return IsUnsafe; }
01851 };
01852 }
01853 
01854 namespace llvm {
01855 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
01856   SCEVFindUnsafe Search(SE);
01857   visitAll(S, Search);
01858   return !Search.IsUnsafe;
01859 }
01860 }