LLVM API Documentation

TargetLowering.h
Go to the documentation of this file.
00001 //===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 ///
00010 /// \file
00011 /// This file describes how to lower LLVM code to machine code.  This has two
00012 /// main components:
00013 ///
00014 ///  1. Which ValueTypes are natively supported by the target.
00015 ///  2. Which operations are supported for supported ValueTypes.
00016 ///  3. Cost thresholds for alternative implementations of certain operations.
00017 ///
00018 /// In addition it has a few other components, like information about FP
00019 /// immediates.
00020 ///
00021 //===----------------------------------------------------------------------===//
00022 
00023 #ifndef LLVM_TARGET_TARGETLOWERING_H
00024 #define LLVM_TARGET_TARGETLOWERING_H
00025 
00026 #include "llvm/ADT/DenseMap.h"
00027 #include "llvm/CodeGen/DAGCombine.h"
00028 #include "llvm/CodeGen/RuntimeLibcalls.h"
00029 #include "llvm/CodeGen/SelectionDAGNodes.h"
00030 #include "llvm/IR/Attributes.h"
00031 #include "llvm/IR/CallSite.h"
00032 #include "llvm/IR/CallingConv.h"
00033 #include "llvm/IR/InlineAsm.h"
00034 #include "llvm/IR/Instructions.h"
00035 #include "llvm/IR/IRBuilder.h"
00036 #include "llvm/MC/MCRegisterInfo.h"
00037 #include "llvm/Target/TargetCallingConv.h"
00038 #include "llvm/Target/TargetMachine.h"
00039 #include <climits>
00040 #include <map>
00041 #include <vector>
00042 
00043 namespace llvm {
00044   class CallInst;
00045   class CCState;
00046   class FastISel;
00047   class FunctionLoweringInfo;
00048   class ImmutableCallSite;
00049   class IntrinsicInst;
00050   class MachineBasicBlock;
00051   class MachineFunction;
00052   class MachineInstr;
00053   class MachineJumpTableInfo;
00054   class Mangler;
00055   class MCContext;
00056   class MCExpr;
00057   class MCSymbol;
00058   template<typename T> class SmallVectorImpl;
00059   class DataLayout;
00060   class TargetRegisterClass;
00061   class TargetLibraryInfo;
00062   class TargetLoweringObjectFile;
00063   class Value;
00064 
00065   namespace Sched {
00066     enum Preference {
00067       None,             // No preference
00068       Source,           // Follow source order.
00069       RegPressure,      // Scheduling for lowest register pressure.
00070       Hybrid,           // Scheduling for both latency and register pressure.
00071       ILP,              // Scheduling for ILP in low register pressure mode.
00072       VLIW              // Scheduling for VLIW targets.
00073     };
00074   }
00075 
00076 /// This base class for TargetLowering contains the SelectionDAG-independent
00077 /// parts that can be used from the rest of CodeGen.
00078 class TargetLoweringBase {
00079   TargetLoweringBase(const TargetLoweringBase&) LLVM_DELETED_FUNCTION;
00080   void operator=(const TargetLoweringBase&) LLVM_DELETED_FUNCTION;
00081 
00082 public:
00083   /// This enum indicates whether operations are valid for a target, and if not,
00084   /// what action should be used to make them valid.
00085   enum LegalizeAction {
00086     Legal,      // The target natively supports this operation.
00087     Promote,    // This operation should be executed in a larger type.
00088     Expand,     // Try to expand this to other ops, otherwise use a libcall.
00089     Custom      // Use the LowerOperation hook to implement custom lowering.
00090   };
00091 
00092   /// This enum indicates whether a types are legal for a target, and if not,
00093   /// what action should be used to make them valid.
00094   enum LegalizeTypeAction {
00095     TypeLegal,           // The target natively supports this type.
00096     TypePromoteInteger,  // Replace this integer with a larger one.
00097     TypeExpandInteger,   // Split this integer into two of half the size.
00098     TypeSoftenFloat,     // Convert this float to a same size integer type.
00099     TypeExpandFloat,     // Split this float into two of half the size.
00100     TypeScalarizeVector, // Replace this one-element vector with its element.
00101     TypeSplitVector,     // Split this vector into two of half the size.
00102     TypeWidenVector      // This vector should be widened into a larger vector.
00103   };
00104 
00105   /// LegalizeKind holds the legalization kind that needs to happen to EVT
00106   /// in order to type-legalize it.
00107   typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
00108 
00109   /// Enum that describes how the target represents true/false values.
00110   enum BooleanContent {
00111     UndefinedBooleanContent,    // Only bit 0 counts, the rest can hold garbage.
00112     ZeroOrOneBooleanContent,        // All bits zero except for bit 0.
00113     ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
00114   };
00115 
00116   /// Enum that describes what type of support for selects the target has.
00117   enum SelectSupportKind {
00118     ScalarValSelect,      // The target supports scalar selects (ex: cmov).
00119     ScalarCondVectorVal,  // The target supports selects with a scalar condition
00120                           // and vector values (ex: cmov).
00121     VectorMaskSelect      // The target supports vector selects with a vector
00122                           // mask (ex: x86 blends).
00123   };
00124 
00125   static ISD::NodeType getExtendForContent(BooleanContent Content) {
00126     switch (Content) {
00127     case UndefinedBooleanContent:
00128       // Extend by adding rubbish bits.
00129       return ISD::ANY_EXTEND;
00130     case ZeroOrOneBooleanContent:
00131       // Extend by adding zero bits.
00132       return ISD::ZERO_EXTEND;
00133     case ZeroOrNegativeOneBooleanContent:
00134       // Extend by copying the sign bit.
00135       return ISD::SIGN_EXTEND;
00136     }
00137     llvm_unreachable("Invalid content kind");
00138   }
00139 
00140   /// NOTE: The constructor takes ownership of TLOF.
00141   explicit TargetLoweringBase(const TargetMachine &TM,
00142                               const TargetLoweringObjectFile *TLOF);
00143   virtual ~TargetLoweringBase();
00144 
00145 protected:
00146   /// \brief Initialize all of the actions to default values.
00147   void initActions();
00148 
00149 public:
00150   const TargetMachine &getTargetMachine() const { return TM; }
00151   const DataLayout *getDataLayout() const { return DL; }
00152   const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
00153 
00154   bool isBigEndian() const { return !IsLittleEndian; }
00155   bool isLittleEndian() const { return IsLittleEndian; }
00156 
00157   /// Return the pointer type for the given address space, defaults to
00158   /// the pointer type from the data layout.
00159   /// FIXME: The default needs to be removed once all the code is updated.
00160   virtual MVT getPointerTy(uint32_t /*AS*/ = 0) const;
00161   unsigned getPointerSizeInBits(uint32_t AS = 0) const;
00162   unsigned getPointerTypeSizeInBits(Type *Ty) const;
00163   virtual MVT getScalarShiftAmountTy(EVT LHSTy) const;
00164 
00165   EVT getShiftAmountTy(EVT LHSTy) const;
00166 
00167   /// Returns the type to be used for the index operand of:
00168   /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
00169   /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
00170   virtual MVT getVectorIdxTy() const {
00171     return getPointerTy();
00172   }
00173 
00174   /// Return true if the select operation is expensive for this target.
00175   bool isSelectExpensive() const { return SelectIsExpensive; }
00176 
00177   virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
00178     return true;
00179   }
00180 
00181   /// Return true if multiple condition registers are available.
00182   bool hasMultipleConditionRegisters() const {
00183     return HasMultipleConditionRegisters;
00184   }
00185 
00186   /// Return true if the target has BitExtract instructions.
00187   bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
00188 
00189   /// Return the preferred vector type legalization action.
00190   virtual TargetLoweringBase::LegalizeTypeAction
00191   getPreferredVectorAction(EVT VT) const {
00192     // The default action for one element vectors is to scalarize
00193     if (VT.getVectorNumElements() == 1)
00194       return TypeScalarizeVector;
00195     // The default action for other vectors is to promote
00196     return TypePromoteInteger;
00197   }
00198 
00199   // There are two general methods for expanding a BUILD_VECTOR node:
00200   //  1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
00201   //     them together.
00202   //  2. Build the vector on the stack and then load it.
00203   // If this function returns true, then method (1) will be used, subject to
00204   // the constraint that all of the necessary shuffles are legal (as determined
00205   // by isShuffleMaskLegal). If this function returns false, then method (2) is
00206   // always used. The vector type, and the number of defined values, are
00207   // provided.
00208   virtual bool
00209   shouldExpandBuildVectorWithShuffles(EVT /* VT */,
00210                                       unsigned DefinedValues) const {
00211     return DefinedValues < 3;
00212   }
00213 
00214   /// Return true if integer divide is usually cheaper than a sequence of
00215   /// several shifts, adds, and multiplies for this target.
00216   bool isIntDivCheap() const { return IntDivIsCheap; }
00217 
00218   /// Returns true if target has indicated at least one type should be bypassed.
00219   bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
00220 
00221   /// Returns map of slow types for division or remainder with corresponding
00222   /// fast types
00223   const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
00224     return BypassSlowDivWidths;
00225   }
00226 
00227   /// Return true if pow2 sdiv is cheaper than a chain of sra/srl/add/sra.
00228   bool isPow2SDivCheap() const { return Pow2SDivIsCheap; }
00229 
00230   /// Return true if Flow Control is an expensive operation that should be
00231   /// avoided.
00232   bool isJumpExpensive() const { return JumpIsExpensive; }
00233 
00234   /// Return true if selects are only cheaper than branches if the branch is
00235   /// unlikely to be predicted right.
00236   bool isPredictableSelectExpensive() const {
00237     return PredictableSelectIsExpensive;
00238   }
00239 
00240   /// isLoadBitCastBeneficial() - Return true if the following transform
00241   /// is beneficial.
00242   /// fold (conv (load x)) -> (load (conv*)x)
00243   /// On architectures that don't natively support some vector loads efficiently,
00244   /// casting the load to a smaller vector of larger types and loading
00245   /// is more efficient, however, this can be undone by optimizations in
00246   /// dag combiner.
00247   virtual bool isLoadBitCastBeneficial(EVT /* Load */, EVT /* Bitcast */) const {
00248     return true;
00249   }
00250 
00251   /// \brief Return if the target supports combining a
00252   /// chain like:
00253   /// \code
00254   ///   %andResult = and %val1, #imm-with-one-bit-set;
00255   ///   %icmpResult = icmp %andResult, 0
00256   ///   br i1 %icmpResult, label %dest1, label %dest2
00257   /// \endcode
00258   /// into a single machine instruction of a form like:
00259   /// \code
00260   ///   brOnBitSet %register, #bitNumber, dest
00261   /// \endcode
00262   bool isMaskAndBranchFoldingLegal() const {
00263     return MaskAndBranchFoldingIsLegal;
00264   }
00265   
00266   /// Return true if target supports floating point exceptions.
00267   bool hasFloatingPointExceptions() const {
00268     return HasFloatingPointExceptions;
00269   }
00270 
00271   /// Return the ValueType of the result of SETCC operations.  Also used to
00272   /// obtain the target's preferred type for the condition operand of SELECT and
00273   /// BRCOND nodes.  In the case of BRCOND the argument passed is MVT::Other
00274   /// since there are no other operands to get a type hint from.
00275   virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
00276 
00277   /// Return the ValueType for comparison libcalls. Comparions libcalls include
00278   /// floating point comparion calls, and Ordered/Unordered check calls on
00279   /// floating point numbers.
00280   virtual
00281   MVT::SimpleValueType getCmpLibcallReturnType() const;
00282 
00283   /// For targets without i1 registers, this gives the nature of the high-bits
00284   /// of boolean values held in types wider than i1.
00285   ///
00286   /// "Boolean values" are special true/false values produced by nodes like
00287   /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
00288   /// Not to be confused with general values promoted from i1.  Some cpus
00289   /// distinguish between vectors of boolean and scalars; the isVec parameter
00290   /// selects between the two kinds.  For example on X86 a scalar boolean should
00291   /// be zero extended from i1, while the elements of a vector of booleans
00292   /// should be sign extended from i1.
00293   ///
00294   /// Some cpus also treat floating point types the same way as they treat
00295   /// vectors instead of the way they treat scalars.
00296   BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
00297     if (isVec)
00298       return BooleanVectorContents;
00299     return isFloat ? BooleanFloatContents : BooleanContents;
00300   }
00301 
00302   BooleanContent getBooleanContents(EVT Type) const {
00303     return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
00304   }
00305 
00306   /// Return target scheduling preference.
00307   Sched::Preference getSchedulingPreference() const {
00308     return SchedPreferenceInfo;
00309   }
00310 
00311   /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
00312   /// for different nodes. This function returns the preference (or none) for
00313   /// the given node.
00314   virtual Sched::Preference getSchedulingPreference(SDNode *) const {
00315     return Sched::None;
00316   }
00317 
00318   /// Return the register class that should be used for the specified value
00319   /// type.
00320   virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
00321     const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
00322     assert(RC && "This value type is not natively supported!");
00323     return RC;
00324   }
00325 
00326   /// Return the 'representative' register class for the specified value
00327   /// type.
00328   ///
00329   /// The 'representative' register class is the largest legal super-reg
00330   /// register class for the register class of the value type.  For example, on
00331   /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
00332   /// register class is GR64 on x86_64.
00333   virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
00334     const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
00335     return RC;
00336   }
00337 
00338   /// Return the cost of the 'representative' register class for the specified
00339   /// value type.
00340   virtual uint8_t getRepRegClassCostFor(MVT VT) const {
00341     return RepRegClassCostForVT[VT.SimpleTy];
00342   }
00343 
00344   /// Return true if the target has native support for the specified value type.
00345   /// This means that it has a register that directly holds it without
00346   /// promotions or expansions.
00347   bool isTypeLegal(EVT VT) const {
00348     assert(!VT.isSimple() ||
00349            (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
00350     return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
00351   }
00352 
00353   class ValueTypeActionImpl {
00354     /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
00355     /// that indicates how instruction selection should deal with the type.
00356     uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
00357 
00358   public:
00359     ValueTypeActionImpl() {
00360       std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions), 0);
00361     }
00362 
00363     LegalizeTypeAction getTypeAction(MVT VT) const {
00364       return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy];
00365     }
00366 
00367     void setTypeAction(MVT VT, LegalizeTypeAction Action) {
00368       unsigned I = VT.SimpleTy;
00369       ValueTypeActions[I] = Action;
00370     }
00371   };
00372 
00373   const ValueTypeActionImpl &getValueTypeActions() const {
00374     return ValueTypeActions;
00375   }
00376 
00377   /// Return how we should legalize values of this type, either it is already
00378   /// legal (return 'Legal') or we need to promote it to a larger type (return
00379   /// 'Promote'), or we need to expand it into multiple registers of smaller
00380   /// integer type (return 'Expand').  'Custom' is not an option.
00381   LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
00382     return getTypeConversion(Context, VT).first;
00383   }
00384   LegalizeTypeAction getTypeAction(MVT VT) const {
00385     return ValueTypeActions.getTypeAction(VT);
00386   }
00387 
00388   /// For types supported by the target, this is an identity function.  For
00389   /// types that must be promoted to larger types, this returns the larger type
00390   /// to promote to.  For integer types that are larger than the largest integer
00391   /// register, this contains one step in the expansion to get to the smaller
00392   /// register. For illegal floating point types, this returns the integer type
00393   /// to transform to.
00394   EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
00395     return getTypeConversion(Context, VT).second;
00396   }
00397 
00398   /// For types supported by the target, this is an identity function.  For
00399   /// types that must be expanded (i.e. integer types that are larger than the
00400   /// largest integer register or illegal floating point types), this returns
00401   /// the largest legal type it will be expanded to.
00402   EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
00403     assert(!VT.isVector());
00404     while (true) {
00405       switch (getTypeAction(Context, VT)) {
00406       case TypeLegal:
00407         return VT;
00408       case TypeExpandInteger:
00409         VT = getTypeToTransformTo(Context, VT);
00410         break;
00411       default:
00412         llvm_unreachable("Type is not legal nor is it to be expanded!");
00413       }
00414     }
00415   }
00416 
00417   /// Vector types are broken down into some number of legal first class types.
00418   /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
00419   /// promoted EVT::f64 values with the X86 FP stack.  Similarly, EVT::v2i64
00420   /// turns into 4 EVT::i32 values with both PPC and X86.
00421   ///
00422   /// This method returns the number of registers needed, and the VT for each
00423   /// register.  It also returns the VT and quantity of the intermediate values
00424   /// before they are promoted/expanded.
00425   unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
00426                                   EVT &IntermediateVT,
00427                                   unsigned &NumIntermediates,
00428                                   MVT &RegisterVT) const;
00429 
00430   struct IntrinsicInfo {
00431     unsigned     opc;         // target opcode
00432     EVT          memVT;       // memory VT
00433     const Value* ptrVal;      // value representing memory location
00434     int          offset;      // offset off of ptrVal
00435     unsigned     size;        // the size of the memory location
00436                               // (taken from memVT if zero)
00437     unsigned     align;       // alignment
00438     bool         vol;         // is volatile?
00439     bool         readMem;     // reads memory?
00440     bool         writeMem;    // writes memory?
00441 
00442     IntrinsicInfo() : opc(0), ptrVal(nullptr), offset(0), size(0), align(1),
00443                       vol(false), readMem(false), writeMem(false) {}
00444   };
00445 
00446   /// Given an intrinsic, checks if on the target the intrinsic will need to map
00447   /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
00448   /// true and store the intrinsic information into the IntrinsicInfo that was
00449   /// passed to the function.
00450   virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
00451                                   unsigned /*Intrinsic*/) const {
00452     return false;
00453   }
00454 
00455   /// Returns true if the target can instruction select the specified FP
00456   /// immediate natively. If false, the legalizer will materialize the FP
00457   /// immediate as a load from a constant pool.
00458   virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
00459     return false;
00460   }
00461 
00462   /// Targets can use this to indicate that they only support *some*
00463   /// VECTOR_SHUFFLE operations, those with specific masks.  By default, if a
00464   /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
00465   /// legal.
00466   virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
00467                                   EVT /*VT*/) const {
00468     return true;
00469   }
00470 
00471   /// Returns true if the operation can trap for the value type.
00472   ///
00473   /// VT must be a legal type. By default, we optimistically assume most
00474   /// operations don't trap except for divide and remainder.
00475   virtual bool canOpTrap(unsigned Op, EVT VT) const;
00476 
00477   /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
00478   /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
00479   /// a VAND with a constant pool entry.
00480   virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
00481                                       EVT /*VT*/) const {
00482     return false;
00483   }
00484 
00485   /// Return how this operation should be treated: either it is legal, needs to
00486   /// be promoted to a larger size, needs to be expanded to some other code
00487   /// sequence, or the target has a custom expander for it.
00488   LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
00489     if (VT.isExtended()) return Expand;
00490     // If a target-specific SDNode requires legalization, require the target
00491     // to provide custom legalization for it.
00492     if (Op > array_lengthof(OpActions[0])) return Custom;
00493     unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
00494     return (LegalizeAction)OpActions[I][Op];
00495   }
00496 
00497   /// Return true if the specified operation is legal on this target or can be
00498   /// made legal with custom lowering. This is used to help guide high-level
00499   /// lowering decisions.
00500   bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
00501     return (VT == MVT::Other || isTypeLegal(VT)) &&
00502       (getOperationAction(Op, VT) == Legal ||
00503        getOperationAction(Op, VT) == Custom);
00504   }
00505 
00506   /// Return true if the specified operation is legal on this target or can be
00507   /// made legal using promotion. This is used to help guide high-level lowering
00508   /// decisions.
00509   bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
00510     return (VT == MVT::Other || isTypeLegal(VT)) &&
00511       (getOperationAction(Op, VT) == Legal ||
00512        getOperationAction(Op, VT) == Promote);
00513   }
00514 
00515   /// Return true if the specified operation is illegal on this target or
00516   /// unlikely to be made legal with custom lowering. This is used to help guide
00517   /// high-level lowering decisions.
00518   bool isOperationExpand(unsigned Op, EVT VT) const {
00519     return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
00520   }
00521 
00522   /// Return true if the specified operation is legal on this target.
00523   bool isOperationLegal(unsigned Op, EVT VT) const {
00524     return (VT == MVT::Other || isTypeLegal(VT)) &&
00525            getOperationAction(Op, VT) == Legal;
00526   }
00527 
00528   /// Return how this load with extension should be treated: either it is legal,
00529   /// needs to be promoted to a larger size, needs to be expanded to some other
00530   /// code sequence, or the target has a custom expander for it.
00531   LegalizeAction getLoadExtAction(unsigned ExtType, EVT VT) const {
00532     if (VT.isExtended()) return Expand;
00533     unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
00534     assert(ExtType < ISD::LAST_LOADEXT_TYPE && I < MVT::LAST_VALUETYPE &&
00535            "Table isn't big enough!");
00536     return (LegalizeAction)LoadExtActions[I][ExtType];
00537   }
00538 
00539   /// Return true if the specified load with extension is legal on this target.
00540   bool isLoadExtLegal(unsigned ExtType, EVT VT) const {
00541     return VT.isSimple() &&
00542       getLoadExtAction(ExtType, VT.getSimpleVT()) == Legal;
00543   }
00544 
00545   /// Return how this store with truncation should be treated: either it is
00546   /// legal, needs to be promoted to a larger size, needs to be expanded to some
00547   /// other code sequence, or the target has a custom expander for it.
00548   LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
00549     if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
00550     unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
00551     unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
00552     assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
00553            "Table isn't big enough!");
00554     return (LegalizeAction)TruncStoreActions[ValI][MemI];
00555   }
00556 
00557   /// Return true if the specified store with truncation is legal on this
00558   /// target.
00559   bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
00560     return isTypeLegal(ValVT) && MemVT.isSimple() &&
00561       getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal;
00562   }
00563 
00564   /// Return how the indexed load should be treated: either it is legal, needs
00565   /// to be promoted to a larger size, needs to be expanded to some other code
00566   /// sequence, or the target has a custom expander for it.
00567   LegalizeAction
00568   getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
00569     assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE &&
00570            "Table isn't big enough!");
00571     unsigned Ty = (unsigned)VT.SimpleTy;
00572     return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
00573   }
00574 
00575   /// Return true if the specified indexed load is legal on this target.
00576   bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
00577     return VT.isSimple() &&
00578       (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
00579        getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
00580   }
00581 
00582   /// Return how the indexed store should be treated: either it is legal, needs
00583   /// to be promoted to a larger size, needs to be expanded to some other code
00584   /// sequence, or the target has a custom expander for it.
00585   LegalizeAction
00586   getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
00587     assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE &&
00588            "Table isn't big enough!");
00589     unsigned Ty = (unsigned)VT.SimpleTy;
00590     return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
00591   }
00592 
00593   /// Return true if the specified indexed load is legal on this target.
00594   bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
00595     return VT.isSimple() &&
00596       (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
00597        getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
00598   }
00599 
00600   /// Return how the condition code should be treated: either it is legal, needs
00601   /// to be expanded to some other code sequence, or the target has a custom
00602   /// expander for it.
00603   LegalizeAction
00604   getCondCodeAction(ISD::CondCode CC, MVT VT) const {
00605     assert((unsigned)CC < array_lengthof(CondCodeActions) &&
00606            ((unsigned)VT.SimpleTy >> 4) < array_lengthof(CondCodeActions[0]) &&
00607            "Table isn't big enough!");
00608     // See setCondCodeAction for how this is encoded.
00609     uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
00610     uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 4];
00611     LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0x3);
00612     assert(Action != Promote && "Can't promote condition code!");
00613     return Action;
00614   }
00615 
00616   /// Return true if the specified condition code is legal on this target.
00617   bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
00618     return
00619       getCondCodeAction(CC, VT) == Legal ||
00620       getCondCodeAction(CC, VT) == Custom;
00621   }
00622 
00623 
00624   /// If the action for this operation is to promote, this method returns the
00625   /// ValueType to promote to.
00626   MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
00627     assert(getOperationAction(Op, VT) == Promote &&
00628            "This operation isn't promoted!");
00629 
00630     // See if this has an explicit type specified.
00631     std::map<std::pair<unsigned, MVT::SimpleValueType>,
00632              MVT::SimpleValueType>::const_iterator PTTI =
00633       PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
00634     if (PTTI != PromoteToType.end()) return PTTI->second;
00635 
00636     assert((VT.isInteger() || VT.isFloatingPoint()) &&
00637            "Cannot autopromote this type, add it with AddPromotedToType.");
00638 
00639     MVT NVT = VT;
00640     do {
00641       NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
00642       assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
00643              "Didn't find type to promote to!");
00644     } while (!isTypeLegal(NVT) ||
00645               getOperationAction(Op, NVT) == Promote);
00646     return NVT;
00647   }
00648 
00649   /// Return the EVT corresponding to this LLVM type.  This is fixed by the LLVM
00650   /// operations except for the pointer size.  If AllowUnknown is true, this
00651   /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
00652   /// otherwise it will assert.
00653   EVT getValueType(Type *Ty, bool AllowUnknown = false) const {
00654     // Lower scalar pointers to native pointer types.
00655     if (PointerType *PTy = dyn_cast<PointerType>(Ty))
00656       return getPointerTy(PTy->getAddressSpace());
00657 
00658     if (Ty->isVectorTy()) {
00659       VectorType *VTy = cast<VectorType>(Ty);
00660       Type *Elm = VTy->getElementType();
00661       // Lower vectors of pointers to native pointer types.
00662       if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
00663         EVT PointerTy(getPointerTy(PT->getAddressSpace()));
00664         Elm = PointerTy.getTypeForEVT(Ty->getContext());
00665       }
00666 
00667       return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
00668                        VTy->getNumElements());
00669     }
00670     return EVT::getEVT(Ty, AllowUnknown);
00671   }
00672 
00673   /// Return the MVT corresponding to this LLVM type. See getValueType.
00674   MVT getSimpleValueType(Type *Ty, bool AllowUnknown = false) const {
00675     return getValueType(Ty, AllowUnknown).getSimpleVT();
00676   }
00677 
00678   /// Return the desired alignment for ByVal or InAlloca aggregate function
00679   /// arguments in the caller parameter area.  This is the actual alignment, not
00680   /// its logarithm.
00681   virtual unsigned getByValTypeAlignment(Type *Ty) const;
00682 
00683   /// Return the type of registers that this ValueType will eventually require.
00684   MVT getRegisterType(MVT VT) const {
00685     assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
00686     return RegisterTypeForVT[VT.SimpleTy];
00687   }
00688 
00689   /// Return the type of registers that this ValueType will eventually require.
00690   MVT getRegisterType(LLVMContext &Context, EVT VT) const {
00691     if (VT.isSimple()) {
00692       assert((unsigned)VT.getSimpleVT().SimpleTy <
00693                 array_lengthof(RegisterTypeForVT));
00694       return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
00695     }
00696     if (VT.isVector()) {
00697       EVT VT1;
00698       MVT RegisterVT;
00699       unsigned NumIntermediates;
00700       (void)getVectorTypeBreakdown(Context, VT, VT1,
00701                                    NumIntermediates, RegisterVT);
00702       return RegisterVT;
00703     }
00704     if (VT.isInteger()) {
00705       return getRegisterType(Context, getTypeToTransformTo(Context, VT));
00706     }
00707     llvm_unreachable("Unsupported extended type!");
00708   }
00709 
00710   /// Return the number of registers that this ValueType will eventually
00711   /// require.
00712   ///
00713   /// This is one for any types promoted to live in larger registers, but may be
00714   /// more than one for types (like i64) that are split into pieces.  For types
00715   /// like i140, which are first promoted then expanded, it is the number of
00716   /// registers needed to hold all the bits of the original type.  For an i140
00717   /// on a 32 bit machine this means 5 registers.
00718   unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
00719     if (VT.isSimple()) {
00720       assert((unsigned)VT.getSimpleVT().SimpleTy <
00721                 array_lengthof(NumRegistersForVT));
00722       return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
00723     }
00724     if (VT.isVector()) {
00725       EVT VT1;
00726       MVT VT2;
00727       unsigned NumIntermediates;
00728       return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
00729     }
00730     if (VT.isInteger()) {
00731       unsigned BitWidth = VT.getSizeInBits();
00732       unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
00733       return (BitWidth + RegWidth - 1) / RegWidth;
00734     }
00735     llvm_unreachable("Unsupported extended type!");
00736   }
00737 
00738   /// If true, then instruction selection should seek to shrink the FP constant
00739   /// of the specified type to a smaller type in order to save space and / or
00740   /// reduce runtime.
00741   virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
00742 
00743   /// When splitting a value of the specified type into parts, does the Lo
00744   /// or Hi part come first?  This usually follows the endianness, except
00745   /// for ppcf128, where the Hi part always comes first.
00746   bool hasBigEndianPartOrdering(EVT VT) const {
00747     return isBigEndian() || VT == MVT::ppcf128;
00748   }
00749 
00750   /// If true, the target has custom DAG combine transformations that it can
00751   /// perform for the specified node.
00752   bool hasTargetDAGCombine(ISD::NodeType NT) const {
00753     assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
00754     return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
00755   }
00756 
00757   /// \brief Get maximum # of store operations permitted for llvm.memset
00758   ///
00759   /// This function returns the maximum number of store operations permitted
00760   /// to replace a call to llvm.memset. The value is set by the target at the
00761   /// performance threshold for such a replacement. If OptSize is true,
00762   /// return the limit for functions that have OptSize attribute.
00763   unsigned getMaxStoresPerMemset(bool OptSize) const {
00764     return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
00765   }
00766 
00767   /// \brief Get maximum # of store operations permitted for llvm.memcpy
00768   ///
00769   /// This function returns the maximum number of store operations permitted
00770   /// to replace a call to llvm.memcpy. The value is set by the target at the
00771   /// performance threshold for such a replacement. If OptSize is true,
00772   /// return the limit for functions that have OptSize attribute.
00773   unsigned getMaxStoresPerMemcpy(bool OptSize) const {
00774     return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
00775   }
00776 
00777   /// \brief Get maximum # of store operations permitted for llvm.memmove
00778   ///
00779   /// This function returns the maximum number of store operations permitted
00780   /// to replace a call to llvm.memmove. The value is set by the target at the
00781   /// performance threshold for such a replacement. If OptSize is true,
00782   /// return the limit for functions that have OptSize attribute.
00783   unsigned getMaxStoresPerMemmove(bool OptSize) const {
00784     return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
00785   }
00786 
00787   /// \brief Determine if the target supports unaligned memory accesses.
00788   ///
00789   /// This function returns true if the target allows unaligned memory accesses
00790   /// of the specified type in the given address space. If true, it also returns
00791   /// whether the unaligned memory access is "fast" in the last argument by
00792   /// reference. This is used, for example, in situations where an array
00793   /// copy/move/set is converted to a sequence of store operations. Its use
00794   /// helps to ensure that such replacements don't generate code that causes an
00795   /// alignment error (trap) on the target machine.
00796   virtual bool allowsMisalignedMemoryAccesses(EVT,
00797                                               unsigned AddrSpace = 0,
00798                                               unsigned Align = 1,
00799                                               bool * /*Fast*/ = nullptr) const {
00800     return false;
00801   }
00802 
00803   /// Returns the target specific optimal type for load and store operations as
00804   /// a result of memset, memcpy, and memmove lowering.
00805   ///
00806   /// If DstAlign is zero that means it's safe to destination alignment can
00807   /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
00808   /// a need to check it against alignment requirement, probably because the
00809   /// source does not need to be loaded. If 'IsMemset' is true, that means it's
00810   /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
00811   /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
00812   /// does not need to be loaded.  It returns EVT::Other if the type should be
00813   /// determined using generic target-independent logic.
00814   virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
00815                                   unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
00816                                   bool /*IsMemset*/,
00817                                   bool /*ZeroMemset*/,
00818                                   bool /*MemcpyStrSrc*/,
00819                                   MachineFunction &/*MF*/) const {
00820     return MVT::Other;
00821   }
00822 
00823   /// Returns true if it's safe to use load / store of the specified type to
00824   /// expand memcpy / memset inline.
00825   ///
00826   /// This is mostly true for all types except for some special cases. For
00827   /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
00828   /// fstpl which also does type conversion. Note the specified type doesn't
00829   /// have to be legal as the hook is used before type legalization.
00830   virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
00831 
00832   /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
00833   bool usesUnderscoreSetJmp() const {
00834     return UseUnderscoreSetJmp;
00835   }
00836 
00837   /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
00838   bool usesUnderscoreLongJmp() const {
00839     return UseUnderscoreLongJmp;
00840   }
00841 
00842   /// Return integer threshold on number of blocks to use jump tables rather
00843   /// than if sequence.
00844   int getMinimumJumpTableEntries() const {
00845     return MinimumJumpTableEntries;
00846   }
00847 
00848   /// If a physical register, this specifies the register that
00849   /// llvm.savestack/llvm.restorestack should save and restore.
00850   unsigned getStackPointerRegisterToSaveRestore() const {
00851     return StackPointerRegisterToSaveRestore;
00852   }
00853 
00854   /// If a physical register, this returns the register that receives the
00855   /// exception address on entry to a landing pad.
00856   unsigned getExceptionPointerRegister() const {
00857     return ExceptionPointerRegister;
00858   }
00859 
00860   /// If a physical register, this returns the register that receives the
00861   /// exception typeid on entry to a landing pad.
00862   unsigned getExceptionSelectorRegister() const {
00863     return ExceptionSelectorRegister;
00864   }
00865 
00866   /// Returns the target's jmp_buf size in bytes (if never set, the default is
00867   /// 200)
00868   unsigned getJumpBufSize() const {
00869     return JumpBufSize;
00870   }
00871 
00872   /// Returns the target's jmp_buf alignment in bytes (if never set, the default
00873   /// is 0)
00874   unsigned getJumpBufAlignment() const {
00875     return JumpBufAlignment;
00876   }
00877 
00878   /// Return the minimum stack alignment of an argument.
00879   unsigned getMinStackArgumentAlignment() const {
00880     return MinStackArgumentAlignment;
00881   }
00882 
00883   /// Return the minimum function alignment.
00884   unsigned getMinFunctionAlignment() const {
00885     return MinFunctionAlignment;
00886   }
00887 
00888   /// Return the preferred function alignment.
00889   unsigned getPrefFunctionAlignment() const {
00890     return PrefFunctionAlignment;
00891   }
00892 
00893   /// Return the preferred loop alignment.
00894   unsigned getPrefLoopAlignment() const {
00895     return PrefLoopAlignment;
00896   }
00897 
00898   /// Return whether the DAG builder should automatically insert fences and
00899   /// reduce ordering for atomics.
00900   bool getInsertFencesForAtomic() const {
00901     return InsertFencesForAtomic;
00902   }
00903 
00904   /// Return true if the target stores stack protector cookies at a fixed offset
00905   /// in some non-standard address space, and populates the address space and
00906   /// offset as appropriate.
00907   virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
00908                                       unsigned &/*Offset*/) const {
00909     return false;
00910   }
00911 
00912   /// Returns the maximal possible offset which can be used for loads / stores
00913   /// from the global.
00914   virtual unsigned getMaximalGlobalOffset() const {
00915     return 0;
00916   }
00917 
00918   /// Returns true if a cast between SrcAS and DestAS is a noop.
00919   virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
00920     return false;
00921   }
00922 
00923   //===--------------------------------------------------------------------===//
00924   /// \name Helpers for TargetTransformInfo implementations
00925   /// @{
00926 
00927   /// Get the ISD node that corresponds to the Instruction class opcode.
00928   int InstructionOpcodeToISD(unsigned Opcode) const;
00929 
00930   /// Estimate the cost of type-legalization and the legalized type.
00931   std::pair<unsigned, MVT> getTypeLegalizationCost(Type *Ty) const;
00932 
00933   /// @}
00934 
00935   //===--------------------------------------------------------------------===//
00936   /// \name Helpers for atomic expansion.
00937   /// @{
00938 
00939   /// True if AtomicExpandPass should use emitLoadLinked/emitStoreConditional
00940   /// and expand AtomicCmpXchgInst.
00941   virtual bool hasLoadLinkedStoreConditional() const { return false; }
00942 
00943   /// Perform a load-linked operation on Addr, returning a "Value *" with the
00944   /// corresponding pointee type. This may entail some non-trivial operations to
00945   /// truncate or reconstruct types that will be illegal in the backend. See
00946   /// ARMISelLowering for an example implementation.
00947   virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
00948                                 AtomicOrdering Ord) const {
00949     llvm_unreachable("Load linked unimplemented on this target");
00950   }
00951 
00952   /// Perform a store-conditional operation to Addr. Return the status of the
00953   /// store. This should be 0 if the store succeeded, non-zero otherwise.
00954   virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
00955                                       Value *Addr, AtomicOrdering Ord) const {
00956     llvm_unreachable("Store conditional unimplemented on this target");
00957   }
00958 
00959   /// Inserts in the IR a target-specific intrinsic specifying a fence.
00960   /// It is called by AtomicExpandPass before expanding an
00961   ///   AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad.
00962   /// RMW and CmpXchg set both IsStore and IsLoad to true.
00963   /// Backends with !getInsertFencesForAtomic() should keep a no-op here.
00964   virtual void emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
00965           bool IsStore, bool IsLoad) const {
00966     assert(!getInsertFencesForAtomic());
00967   }
00968 
00969   /// Inserts in the IR a target-specific intrinsic specifying a fence.
00970   /// It is called by AtomicExpandPass after expanding an
00971   ///   AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad.
00972   /// RMW and CmpXchg set both IsStore and IsLoad to true.
00973   /// Backends with !getInsertFencesForAtomic() should keep a no-op here.
00974   virtual void emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
00975           bool IsStore, bool IsLoad) const {
00976     assert(!getInsertFencesForAtomic());
00977   }
00978 
00979   /// Returns true if the given (atomic) store should be expanded by the
00980   /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
00981   virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
00982     return false;
00983   }
00984 
00985   /// Returns true if the given (atomic) load should be expanded by the
00986   /// IR-level AtomicExpand pass into a load-linked instruction
00987   /// (through emitLoadLinked()).
00988   virtual bool shouldExpandAtomicLoadInIR(LoadInst *LI) const { return false; }
00989 
00990   /// Returns true if the given AtomicRMW should be expanded by the
00991   /// IR-level AtomicExpand pass into a loop using LoadLinked/StoreConditional.
00992   virtual bool shouldExpandAtomicRMWInIR(AtomicRMWInst *RMWI) const {
00993     return false;
00994   }
00995 
00996   //===--------------------------------------------------------------------===//
00997   // TargetLowering Configuration Methods - These methods should be invoked by
00998   // the derived class constructor to configure this object for the target.
00999   //
01000 
01001   /// \brief Reset the operation actions based on target options.
01002   virtual void resetOperationActions() {}
01003 
01004 protected:
01005   /// Specify how the target extends the result of integer and floating point
01006   /// boolean values from i1 to a wider type.  See getBooleanContents.
01007   void setBooleanContents(BooleanContent Ty) {
01008     BooleanContents = Ty;
01009     BooleanFloatContents = Ty;
01010   }
01011 
01012   /// Specify how the target extends the result of integer and floating point
01013   /// boolean values from i1 to a wider type.  See getBooleanContents.
01014   void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
01015     BooleanContents = IntTy;
01016     BooleanFloatContents = FloatTy;
01017   }
01018 
01019   /// Specify how the target extends the result of a vector boolean value from a
01020   /// vector of i1 to a wider type.  See getBooleanContents.
01021   void setBooleanVectorContents(BooleanContent Ty) {
01022     BooleanVectorContents = Ty;
01023   }
01024 
01025   /// Specify the target scheduling preference.
01026   void setSchedulingPreference(Sched::Preference Pref) {
01027     SchedPreferenceInfo = Pref;
01028   }
01029 
01030   /// Indicate whether this target prefers to use _setjmp to implement
01031   /// llvm.setjmp or the version without _.  Defaults to false.
01032   void setUseUnderscoreSetJmp(bool Val) {
01033     UseUnderscoreSetJmp = Val;
01034   }
01035 
01036   /// Indicate whether this target prefers to use _longjmp to implement
01037   /// llvm.longjmp or the version without _.  Defaults to false.
01038   void setUseUnderscoreLongJmp(bool Val) {
01039     UseUnderscoreLongJmp = Val;
01040   }
01041 
01042   /// Indicate the number of blocks to generate jump tables rather than if
01043   /// sequence.
01044   void setMinimumJumpTableEntries(int Val) {
01045     MinimumJumpTableEntries = Val;
01046   }
01047 
01048   /// If set to a physical register, this specifies the register that
01049   /// llvm.savestack/llvm.restorestack should save and restore.
01050   void setStackPointerRegisterToSaveRestore(unsigned R) {
01051     StackPointerRegisterToSaveRestore = R;
01052   }
01053 
01054   /// If set to a physical register, this sets the register that receives the
01055   /// exception address on entry to a landing pad.
01056   void setExceptionPointerRegister(unsigned R) {
01057     ExceptionPointerRegister = R;
01058   }
01059 
01060   /// If set to a physical register, this sets the register that receives the
01061   /// exception typeid on entry to a landing pad.
01062   void setExceptionSelectorRegister(unsigned R) {
01063     ExceptionSelectorRegister = R;
01064   }
01065 
01066   /// Tells the code generator not to expand operations into sequences that use
01067   /// the select operations if possible.
01068   void setSelectIsExpensive(bool isExpensive = true) {
01069     SelectIsExpensive = isExpensive;
01070   }
01071 
01072   /// Tells the code generator that the target has multiple (allocatable)
01073   /// condition registers that can be used to store the results of comparisons
01074   /// for use by selects and conditional branches. With multiple condition
01075   /// registers, the code generator will not aggressively sink comparisons into
01076   /// the blocks of their users.
01077   void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
01078     HasMultipleConditionRegisters = hasManyRegs;
01079   }
01080 
01081   /// Tells the code generator that the target has BitExtract instructions.
01082   /// The code generator will aggressively sink "shift"s into the blocks of
01083   /// their users if the users will generate "and" instructions which can be
01084   /// combined with "shift" to BitExtract instructions.
01085   void setHasExtractBitsInsn(bool hasExtractInsn = true) {
01086     HasExtractBitsInsn = hasExtractInsn;
01087   }
01088 
01089   /// Tells the code generator not to expand sequence of operations into a
01090   /// separate sequences that increases the amount of flow control.
01091   void setJumpIsExpensive(bool isExpensive = true) {
01092     JumpIsExpensive = isExpensive;
01093   }
01094 
01095   /// Tells the code generator that integer divide is expensive, and if
01096   /// possible, should be replaced by an alternate sequence of instructions not
01097   /// containing an integer divide.
01098   void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
01099   
01100   /// Tells the code generator that this target supports floating point
01101   /// exceptions and cares about preserving floating point exception behavior.
01102   void setHasFloatingPointExceptions(bool FPExceptions = true) {
01103     HasFloatingPointExceptions = FPExceptions;
01104   }
01105 
01106   /// Tells the code generator which bitwidths to bypass.
01107   void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
01108     BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
01109   }
01110 
01111   /// Tells the code generator that it shouldn't generate sra/srl/add/sra for a
01112   /// signed divide by power of two; let the target handle it.
01113   void setPow2SDivIsCheap(bool isCheap = true) { Pow2SDivIsCheap = isCheap; }
01114 
01115   /// Add the specified register class as an available regclass for the
01116   /// specified value type. This indicates the selector can handle values of
01117   /// that class natively.
01118   void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
01119     assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
01120     AvailableRegClasses.push_back(std::make_pair(VT, RC));
01121     RegClassForVT[VT.SimpleTy] = RC;
01122   }
01123 
01124   /// Remove all register classes.
01125   void clearRegisterClasses() {
01126     memset(RegClassForVT, 0,MVT::LAST_VALUETYPE * sizeof(TargetRegisterClass*));
01127 
01128     AvailableRegClasses.clear();
01129   }
01130 
01131   /// \brief Remove all operation actions.
01132   void clearOperationActions() {
01133   }
01134 
01135   /// Return the largest legal super-reg register class of the register class
01136   /// for the specified type and its associated "cost".
01137   virtual std::pair<const TargetRegisterClass*, uint8_t>
01138   findRepresentativeClass(MVT VT) const;
01139 
01140   /// Once all of the register classes are added, this allows us to compute
01141   /// derived properties we expose.
01142   void computeRegisterProperties();
01143 
01144   /// Indicate that the specified operation does not work with the specified
01145   /// type and indicate what to do about it.
01146   void setOperationAction(unsigned Op, MVT VT,
01147                           LegalizeAction Action) {
01148     assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
01149     OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
01150   }
01151 
01152   /// Indicate that the specified load with extension does not work with the
01153   /// specified type and indicate what to do about it.
01154   void setLoadExtAction(unsigned ExtType, MVT VT,
01155                         LegalizeAction Action) {
01156     assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
01157            "Table isn't big enough!");
01158     LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
01159   }
01160 
01161   /// Indicate that the specified truncating store does not work with the
01162   /// specified type and indicate what to do about it.
01163   void setTruncStoreAction(MVT ValVT, MVT MemVT,
01164                            LegalizeAction Action) {
01165     assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
01166            "Table isn't big enough!");
01167     TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action;
01168   }
01169 
01170   /// Indicate that the specified indexed load does or does not work with the
01171   /// specified type and indicate what to do abort it.
01172   ///
01173   /// NOTE: All indexed mode loads are initialized to Expand in
01174   /// TargetLowering.cpp
01175   void setIndexedLoadAction(unsigned IdxMode, MVT VT,
01176                             LegalizeAction Action) {
01177     assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
01178            (unsigned)Action < 0xf && "Table isn't big enough!");
01179     // Load action are kept in the upper half.
01180     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
01181     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
01182   }
01183 
01184   /// Indicate that the specified indexed store does or does not work with the
01185   /// specified type and indicate what to do about it.
01186   ///
01187   /// NOTE: All indexed mode stores are initialized to Expand in
01188   /// TargetLowering.cpp
01189   void setIndexedStoreAction(unsigned IdxMode, MVT VT,
01190                              LegalizeAction Action) {
01191     assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
01192            (unsigned)Action < 0xf && "Table isn't big enough!");
01193     // Store action are kept in the lower half.
01194     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
01195     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
01196   }
01197 
01198   /// Indicate that the specified condition code is or isn't supported on the
01199   /// target and indicate what to do about it.
01200   void setCondCodeAction(ISD::CondCode CC, MVT VT,
01201                          LegalizeAction Action) {
01202     assert(VT < MVT::LAST_VALUETYPE &&
01203            (unsigned)CC < array_lengthof(CondCodeActions) &&
01204            "Table isn't big enough!");
01205     /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 32-bit
01206     /// value and the upper 27 bits index into the second dimension of the array
01207     /// to select what 32-bit value to use.
01208     uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
01209     CondCodeActions[CC][VT.SimpleTy >> 4] &= ~((uint32_t)0x3 << Shift);
01210     CondCodeActions[CC][VT.SimpleTy >> 4] |= (uint32_t)Action << Shift;
01211   }
01212 
01213   /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
01214   /// to trying a larger integer/fp until it can find one that works. If that
01215   /// default is insufficient, this method can be used by the target to override
01216   /// the default.
01217   void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
01218     PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
01219   }
01220 
01221   /// Targets should invoke this method for each target independent node that
01222   /// they want to provide a custom DAG combiner for by implementing the
01223   /// PerformDAGCombine virtual method.
01224   void setTargetDAGCombine(ISD::NodeType NT) {
01225     assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
01226     TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
01227   }
01228 
01229   /// Set the target's required jmp_buf buffer size (in bytes); default is 200
01230   void setJumpBufSize(unsigned Size) {
01231     JumpBufSize = Size;
01232   }
01233 
01234   /// Set the target's required jmp_buf buffer alignment (in bytes); default is
01235   /// 0
01236   void setJumpBufAlignment(unsigned Align) {
01237     JumpBufAlignment = Align;
01238   }
01239 
01240   /// Set the target's minimum function alignment (in log2(bytes))
01241   void setMinFunctionAlignment(unsigned Align) {
01242     MinFunctionAlignment = Align;
01243   }
01244 
01245   /// Set the target's preferred function alignment.  This should be set if
01246   /// there is a performance benefit to higher-than-minimum alignment (in
01247   /// log2(bytes))
01248   void setPrefFunctionAlignment(unsigned Align) {
01249     PrefFunctionAlignment = Align;
01250   }
01251 
01252   /// Set the target's preferred loop alignment. Default alignment is zero, it
01253   /// means the target does not care about loop alignment.  The alignment is
01254   /// specified in log2(bytes).
01255   void setPrefLoopAlignment(unsigned Align) {
01256     PrefLoopAlignment = Align;
01257   }
01258 
01259   /// Set the minimum stack alignment of an argument (in log2(bytes)).
01260   void setMinStackArgumentAlignment(unsigned Align) {
01261     MinStackArgumentAlignment = Align;
01262   }
01263 
01264   /// Set if the DAG builder should automatically insert fences and reduce the
01265   /// order of atomic memory operations to Monotonic.
01266   void setInsertFencesForAtomic(bool fence) {
01267     InsertFencesForAtomic = fence;
01268   }
01269 
01270 public:
01271   //===--------------------------------------------------------------------===//
01272   // Addressing mode description hooks (used by LSR etc).
01273   //
01274 
01275   /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
01276   /// instructions reading the address. This allows as much computation as
01277   /// possible to be done in the address mode for that operand. This hook lets
01278   /// targets also pass back when this should be done on intrinsics which
01279   /// load/store.
01280   virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/,
01281                                     SmallVectorImpl<Value*> &/*Ops*/,
01282                                     Type *&/*AccessTy*/) const {
01283     return false;
01284   }
01285 
01286   /// This represents an addressing mode of:
01287   ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
01288   /// If BaseGV is null,  there is no BaseGV.
01289   /// If BaseOffs is zero, there is no base offset.
01290   /// If HasBaseReg is false, there is no base register.
01291   /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg with
01292   /// no scale.
01293   struct AddrMode {
01294     GlobalValue *BaseGV;
01295     int64_t      BaseOffs;
01296     bool         HasBaseReg;
01297     int64_t      Scale;
01298     AddrMode() : BaseGV(nullptr), BaseOffs(0), HasBaseReg(false), Scale(0) {}
01299   };
01300 
01301   /// Return true if the addressing mode represented by AM is legal for this
01302   /// target, for a load/store of the specified type.
01303   ///
01304   /// The type may be VoidTy, in which case only return true if the addressing
01305   /// mode is legal for a load/store of any legal type.  TODO: Handle
01306   /// pre/postinc as well.
01307   virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
01308 
01309   /// \brief Return the cost of the scaling factor used in the addressing mode
01310   /// represented by AM for this target, for a load/store of the specified type.
01311   ///
01312   /// If the AM is supported, the return value must be >= 0.
01313   /// If the AM is not supported, it returns a negative value.
01314   /// TODO: Handle pre/postinc as well.
01315   virtual int getScalingFactorCost(const AddrMode &AM, Type *Ty) const {
01316     // Default: assume that any scaling factor used in a legal AM is free.
01317     if (isLegalAddressingMode(AM, Ty)) return 0;
01318     return -1;
01319   }
01320 
01321   /// Return true if the specified immediate is legal icmp immediate, that is
01322   /// the target has icmp instructions which can compare a register against the
01323   /// immediate without having to materialize the immediate into a register.
01324   virtual bool isLegalICmpImmediate(int64_t) const {
01325     return true;
01326   }
01327 
01328   /// Return true if the specified immediate is legal add immediate, that is the
01329   /// target has add instructions which can add a register with the immediate
01330   /// without having to materialize the immediate into a register.
01331   virtual bool isLegalAddImmediate(int64_t) const {
01332     return true;
01333   }
01334 
01335   /// Return true if it's significantly cheaper to shift a vector by a uniform
01336   /// scalar than by an amount which will vary across each lane. On x86, for
01337   /// example, there is a "psllw" instruction for the former case, but no simple
01338   /// instruction for a general "a << b" operation on vectors.
01339   virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
01340     return false;
01341   }
01342 
01343   /// Return true if it's free to truncate a value of type Ty1 to type
01344   /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
01345   /// by referencing its sub-register AX.
01346   virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
01347     return false;
01348   }
01349 
01350   /// Return true if a truncation from Ty1 to Ty2 is permitted when deciding
01351   /// whether a call is in tail position. Typically this means that both results
01352   /// would be assigned to the same register or stack slot, but it could mean
01353   /// the target performs adequate checks of its own before proceeding with the
01354   /// tail call.
01355   virtual bool allowTruncateForTailCall(Type * /*Ty1*/, Type * /*Ty2*/) const {
01356     return false;
01357   }
01358 
01359   virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const {
01360     return false;
01361   }
01362 
01363   /// Return true if any actual instruction that defines a value of type Ty1
01364   /// implicitly zero-extends the value to Ty2 in the result register.
01365   ///
01366   /// This does not necessarily include registers defined in unknown ways, such
01367   /// as incoming arguments, or copies from unknown virtual registers. Also, if
01368   /// isTruncateFree(Ty2, Ty1) is true, this does not necessarily apply to
01369   /// truncate instructions. e.g. on x86-64, all instructions that define 32-bit
01370   /// values implicit zero-extend the result out to 64 bits.
01371   virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
01372     return false;
01373   }
01374 
01375   virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const {
01376     return false;
01377   }
01378 
01379   /// Return true if the target supplies and combines to a paired load
01380   /// two loaded values of type LoadedType next to each other in memory.
01381   /// RequiredAlignment gives the minimal alignment constraints that must be met
01382   /// to be able to select this paired load.
01383   ///
01384   /// This information is *not* used to generate actual paired loads, but it is
01385   /// used to generate a sequence of loads that is easier to combine into a
01386   /// paired load.
01387   /// For instance, something like this:
01388   /// a = load i64* addr
01389   /// b = trunc i64 a to i32
01390   /// c = lshr i64 a, 32
01391   /// d = trunc i64 c to i32
01392   /// will be optimized into:
01393   /// b = load i32* addr1
01394   /// d = load i32* addr2
01395   /// Where addr1 = addr2 +/- sizeof(i32).
01396   ///
01397   /// In other words, unless the target performs a post-isel load combining,
01398   /// this information should not be provided because it will generate more
01399   /// loads.
01400   virtual bool hasPairedLoad(Type * /*LoadedType*/,
01401                              unsigned & /*RequiredAligment*/) const {
01402     return false;
01403   }
01404 
01405   virtual bool hasPairedLoad(EVT /*LoadedType*/,
01406                              unsigned & /*RequiredAligment*/) const {
01407     return false;
01408   }
01409 
01410   /// Return true if zero-extending the specific node Val to type VT2 is free
01411   /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
01412   /// because it's folded such as X86 zero-extending loads).
01413   virtual bool isZExtFree(SDValue Val, EVT VT2) const {
01414     return isZExtFree(Val.getValueType(), VT2);
01415   }
01416 
01417   /// Return true if an fneg operation is free to the point where it is never
01418   /// worthwhile to replace it with a bitwise operation.
01419   virtual bool isFNegFree(EVT VT) const {
01420     assert(VT.isFloatingPoint());
01421     return false;
01422   }
01423 
01424   /// Return true if an fabs operation is free to the point where it is never
01425   /// worthwhile to replace it with a bitwise operation.
01426   virtual bool isFAbsFree(EVT VT) const {
01427     assert(VT.isFloatingPoint());
01428     return false;
01429   }
01430 
01431   /// Return true if an FMA operation is faster than a pair of fmul and fadd
01432   /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
01433   /// returns true, otherwise fmuladd is expanded to fmul + fadd.
01434   ///
01435   /// NOTE: This may be called before legalization on types for which FMAs are
01436   /// not legal, but should return true if those types will eventually legalize
01437   /// to types that support FMAs. After legalization, it will only be called on
01438   /// types that support FMAs (via Legal or Custom actions)
01439   virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
01440     return false;
01441   }
01442 
01443   /// Return true if it's profitable to narrow operations of type VT1 to
01444   /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
01445   /// i32 to i16.
01446   virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
01447     return false;
01448   }
01449 
01450   /// \brief Return true if it is beneficial to convert a load of a constant to
01451   /// just the constant itself.
01452   /// On some targets it might be more efficient to use a combination of
01453   /// arithmetic instructions to materialize the constant instead of loading it
01454   /// from a constant pool.
01455   virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
01456                                                  Type *Ty) const {
01457     return false;
01458   }
01459   //===--------------------------------------------------------------------===//
01460   // Runtime Library hooks
01461   //
01462 
01463   /// Rename the default libcall routine name for the specified libcall.
01464   void setLibcallName(RTLIB::Libcall Call, const char *Name) {
01465     LibcallRoutineNames[Call] = Name;
01466   }
01467 
01468   /// Get the libcall routine name for the specified libcall.
01469   const char *getLibcallName(RTLIB::Libcall Call) const {
01470     return LibcallRoutineNames[Call];
01471   }
01472 
01473   /// Override the default CondCode to be used to test the result of the
01474   /// comparison libcall against zero.
01475   void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
01476     CmpLibcallCCs[Call] = CC;
01477   }
01478 
01479   /// Get the CondCode that's to be used to test the result of the comparison
01480   /// libcall against zero.
01481   ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
01482     return CmpLibcallCCs[Call];
01483   }
01484 
01485   /// Set the CallingConv that should be used for the specified libcall.
01486   void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
01487     LibcallCallingConvs[Call] = CC;
01488   }
01489 
01490   /// Get the CallingConv that should be used for the specified libcall.
01491   CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
01492     return LibcallCallingConvs[Call];
01493   }
01494 
01495 private:
01496   const TargetMachine &TM;
01497   const DataLayout *DL;
01498   const TargetLoweringObjectFile &TLOF;
01499 
01500   /// True if this is a little endian target.
01501   bool IsLittleEndian;
01502 
01503   /// Tells the code generator not to expand operations into sequences that use
01504   /// the select operations if possible.
01505   bool SelectIsExpensive;
01506 
01507   /// Tells the code generator that the target has multiple (allocatable)
01508   /// condition registers that can be used to store the results of comparisons
01509   /// for use by selects and conditional branches. With multiple condition
01510   /// registers, the code generator will not aggressively sink comparisons into
01511   /// the blocks of their users.
01512   bool HasMultipleConditionRegisters;
01513 
01514   /// Tells the code generator that the target has BitExtract instructions.
01515   /// The code generator will aggressively sink "shift"s into the blocks of
01516   /// their users if the users will generate "and" instructions which can be
01517   /// combined with "shift" to BitExtract instructions.
01518   bool HasExtractBitsInsn;
01519 
01520   /// Tells the code generator not to expand integer divides by constants into a
01521   /// sequence of muls, adds, and shifts.  This is a hack until a real cost
01522   /// model is in place.  If we ever optimize for size, this will be set to true
01523   /// unconditionally.
01524   bool IntDivIsCheap;
01525 
01526   /// Tells the code generator to bypass slow divide or remainder
01527   /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
01528   /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
01529   /// div/rem when the operands are positive and less than 256.
01530   DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
01531 
01532   /// Tells the code generator that it shouldn't generate sra/srl/add/sra for a
01533   /// signed divide by power of two; let the target handle it.
01534   bool Pow2SDivIsCheap;
01535 
01536   /// Tells the code generator that it shouldn't generate extra flow control
01537   /// instructions and should attempt to combine flow control instructions via
01538   /// predication.
01539   bool JumpIsExpensive;
01540 
01541   /// Whether the target supports or cares about preserving floating point
01542   /// exception behavior.
01543   bool HasFloatingPointExceptions;
01544 
01545   /// This target prefers to use _setjmp to implement llvm.setjmp.
01546   ///
01547   /// Defaults to false.
01548   bool UseUnderscoreSetJmp;
01549 
01550   /// This target prefers to use _longjmp to implement llvm.longjmp.
01551   ///
01552   /// Defaults to false.
01553   bool UseUnderscoreLongJmp;
01554 
01555   /// Number of blocks threshold to use jump tables.
01556   int MinimumJumpTableEntries;
01557 
01558   /// Information about the contents of the high-bits in boolean values held in
01559   /// a type wider than i1. See getBooleanContents.
01560   BooleanContent BooleanContents;
01561 
01562   /// Information about the contents of the high-bits in boolean values held in
01563   /// a type wider than i1. See getBooleanContents.
01564   BooleanContent BooleanFloatContents;
01565 
01566   /// Information about the contents of the high-bits in boolean vector values
01567   /// when the element type is wider than i1. See getBooleanContents.
01568   BooleanContent BooleanVectorContents;
01569 
01570   /// The target scheduling preference: shortest possible total cycles or lowest
01571   /// register usage.
01572   Sched::Preference SchedPreferenceInfo;
01573 
01574   /// The size, in bytes, of the target's jmp_buf buffers
01575   unsigned JumpBufSize;
01576 
01577   /// The alignment, in bytes, of the target's jmp_buf buffers
01578   unsigned JumpBufAlignment;
01579 
01580   /// The minimum alignment that any argument on the stack needs to have.
01581   unsigned MinStackArgumentAlignment;
01582 
01583   /// The minimum function alignment (used when optimizing for size, and to
01584   /// prevent explicitly provided alignment from leading to incorrect code).
01585   unsigned MinFunctionAlignment;
01586 
01587   /// The preferred function alignment (used when alignment unspecified and
01588   /// optimizing for speed).
01589   unsigned PrefFunctionAlignment;
01590 
01591   /// The preferred loop alignment.
01592   unsigned PrefLoopAlignment;
01593 
01594   /// Whether the DAG builder should automatically insert fences and reduce
01595   /// ordering for atomics.  (This will be set for for most architectures with
01596   /// weak memory ordering.)
01597   bool InsertFencesForAtomic;
01598 
01599   /// If set to a physical register, this specifies the register that
01600   /// llvm.savestack/llvm.restorestack should save and restore.
01601   unsigned StackPointerRegisterToSaveRestore;
01602 
01603   /// If set to a physical register, this specifies the register that receives
01604   /// the exception address on entry to a landing pad.
01605   unsigned ExceptionPointerRegister;
01606 
01607   /// If set to a physical register, this specifies the register that receives
01608   /// the exception typeid on entry to a landing pad.
01609   unsigned ExceptionSelectorRegister;
01610 
01611   /// This indicates the default register class to use for each ValueType the
01612   /// target supports natively.
01613   const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
01614   unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
01615   MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
01616 
01617   /// This indicates the "representative" register class to use for each
01618   /// ValueType the target supports natively. This information is used by the
01619   /// scheduler to track register pressure. By default, the representative
01620   /// register class is the largest legal super-reg register class of the
01621   /// register class of the specified type. e.g. On x86, i8, i16, and i32's
01622   /// representative class would be GR32.
01623   const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
01624 
01625   /// This indicates the "cost" of the "representative" register class for each
01626   /// ValueType. The cost is used by the scheduler to approximate register
01627   /// pressure.
01628   uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
01629 
01630   /// For any value types we are promoting or expanding, this contains the value
01631   /// type that we are changing to.  For Expanded types, this contains one step
01632   /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
01633   /// (e.g. i64 -> i16).  For types natively supported by the system, this holds
01634   /// the same type (e.g. i32 -> i32).
01635   MVT TransformToType[MVT::LAST_VALUETYPE];
01636 
01637   /// For each operation and each value type, keep a LegalizeAction that
01638   /// indicates how instruction selection should deal with the operation.  Most
01639   /// operations are Legal (aka, supported natively by the target), but
01640   /// operations that are not should be described.  Note that operations on
01641   /// non-legal value types are not described here.
01642   uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
01643 
01644   /// For each load extension type and each value type, keep a LegalizeAction
01645   /// that indicates how instruction selection should deal with a load of a
01646   /// specific value type and extension type.
01647   uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE];
01648 
01649   /// For each value type pair keep a LegalizeAction that indicates whether a
01650   /// truncating store of a specific value type and truncating type is legal.
01651   uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
01652 
01653   /// For each indexed mode and each value type, keep a pair of LegalizeAction
01654   /// that indicates how instruction selection should deal with the load /
01655   /// store.
01656   ///
01657   /// The first dimension is the value_type for the reference. The second
01658   /// dimension represents the various modes for load store.
01659   uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
01660 
01661   /// For each condition code (ISD::CondCode) keep a LegalizeAction that
01662   /// indicates how instruction selection should deal with the condition code.
01663   ///
01664   /// Because each CC action takes up 2 bits, we need to have the array size be
01665   /// large enough to fit all of the value types. This can be done by rounding
01666   /// up the MVT::LAST_VALUETYPE value to the next multiple of 16.
01667   uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 15) / 16];
01668 
01669   ValueTypeActionImpl ValueTypeActions;
01670 
01671 public:
01672   LegalizeKind
01673   getTypeConversion(LLVMContext &Context, EVT VT) const {
01674     // If this is a simple type, use the ComputeRegisterProp mechanism.
01675     if (VT.isSimple()) {
01676       MVT SVT = VT.getSimpleVT();
01677       assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
01678       MVT NVT = TransformToType[SVT.SimpleTy];
01679       LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
01680 
01681       assert(
01682         (LA == TypeLegal || LA == TypeSoftenFloat ||
01683          ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)
01684          && "Promote may not follow Expand or Promote");
01685 
01686       if (LA == TypeSplitVector)
01687         return LegalizeKind(LA, EVT::getVectorVT(Context,
01688                                                  SVT.getVectorElementType(),
01689                                                  SVT.getVectorNumElements()/2));
01690       if (LA == TypeScalarizeVector)
01691         return LegalizeKind(LA, SVT.getVectorElementType());
01692       return LegalizeKind(LA, NVT);
01693     }
01694 
01695     // Handle Extended Scalar Types.
01696     if (!VT.isVector()) {
01697       assert(VT.isInteger() && "Float types must be simple");
01698       unsigned BitSize = VT.getSizeInBits();
01699       // First promote to a power-of-two size, then expand if necessary.
01700       if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
01701         EVT NVT = VT.getRoundIntegerType(Context);
01702         assert(NVT != VT && "Unable to round integer VT");
01703         LegalizeKind NextStep = getTypeConversion(Context, NVT);
01704         // Avoid multi-step promotion.
01705         if (NextStep.first == TypePromoteInteger) return NextStep;
01706         // Return rounded integer type.
01707         return LegalizeKind(TypePromoteInteger, NVT);
01708       }
01709 
01710       return LegalizeKind(TypeExpandInteger,
01711                           EVT::getIntegerVT(Context, VT.getSizeInBits()/2));
01712     }
01713 
01714     // Handle vector types.
01715     unsigned NumElts = VT.getVectorNumElements();
01716     EVT EltVT = VT.getVectorElementType();
01717 
01718     // Vectors with only one element are always scalarized.
01719     if (NumElts == 1)
01720       return LegalizeKind(TypeScalarizeVector, EltVT);
01721 
01722     // Try to widen vector elements until the element type is a power of two and
01723     // promote it to a legal type later on, for example:
01724     // <3 x i8> -> <4 x i8> -> <4 x i32>
01725     if (EltVT.isInteger()) {
01726       // Vectors with a number of elements that is not a power of two are always
01727       // widened, for example <3 x i8> -> <4 x i8>.
01728       if (!VT.isPow2VectorType()) {
01729         NumElts = (unsigned)NextPowerOf2(NumElts);
01730         EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
01731         return LegalizeKind(TypeWidenVector, NVT);
01732       }
01733 
01734       // Examine the element type.
01735       LegalizeKind LK = getTypeConversion(Context, EltVT);
01736 
01737       // If type is to be expanded, split the vector.
01738       //  <4 x i140> -> <2 x i140>
01739       if (LK.first == TypeExpandInteger)
01740         return LegalizeKind(TypeSplitVector,
01741                             EVT::getVectorVT(Context, EltVT, NumElts / 2));
01742 
01743       // Promote the integer element types until a legal vector type is found
01744       // or until the element integer type is too big. If a legal type was not
01745       // found, fallback to the usual mechanism of widening/splitting the
01746       // vector.
01747       EVT OldEltVT = EltVT;
01748       while (1) {
01749         // Increase the bitwidth of the element to the next pow-of-two
01750         // (which is greater than 8 bits).
01751         EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()
01752                                  ).getRoundIntegerType(Context);
01753 
01754         // Stop trying when getting a non-simple element type.
01755         // Note that vector elements may be greater than legal vector element
01756         // types. Example: X86 XMM registers hold 64bit element on 32bit
01757         // systems.
01758         if (!EltVT.isSimple()) break;
01759 
01760         // Build a new vector type and check if it is legal.
01761         MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
01762         // Found a legal promoted vector type.
01763         if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
01764           return LegalizeKind(TypePromoteInteger,
01765                               EVT::getVectorVT(Context, EltVT, NumElts));
01766       }
01767 
01768       // Reset the type to the unexpanded type if we did not find a legal vector
01769       // type with a promoted vector element type.
01770       EltVT = OldEltVT;
01771     }
01772 
01773     // Try to widen the vector until a legal type is found.
01774     // If there is no wider legal type, split the vector.
01775     while (1) {
01776       // Round up to the next power of 2.
01777       NumElts = (unsigned)NextPowerOf2(NumElts);
01778 
01779       // If there is no simple vector type with this many elements then there
01780       // cannot be a larger legal vector type.  Note that this assumes that
01781       // there are no skipped intermediate vector types in the simple types.
01782       if (!EltVT.isSimple()) break;
01783       MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
01784       if (LargerVector == MVT()) break;
01785 
01786       // If this type is legal then widen the vector.
01787       if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
01788         return LegalizeKind(TypeWidenVector, LargerVector);
01789     }
01790 
01791     // Widen odd vectors to next power of two.
01792     if (!VT.isPow2VectorType()) {
01793       EVT NVT = VT.getPow2VectorType(Context);
01794       return LegalizeKind(TypeWidenVector, NVT);
01795     }
01796 
01797     // Vectors with illegal element types are expanded.
01798     EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
01799     return LegalizeKind(TypeSplitVector, NVT);
01800   }
01801 
01802 private:
01803   std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses;
01804 
01805   /// Targets can specify ISD nodes that they would like PerformDAGCombine
01806   /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
01807   /// array.
01808   unsigned char
01809   TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
01810 
01811   /// For operations that must be promoted to a specific type, this holds the
01812   /// destination type.  This map should be sparse, so don't hold it as an
01813   /// array.
01814   ///
01815   /// Targets add entries to this map with AddPromotedToType(..), clients access
01816   /// this with getTypeToPromoteTo(..).
01817   std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
01818     PromoteToType;
01819 
01820   /// Stores the name each libcall.
01821   const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
01822 
01823   /// The ISD::CondCode that should be used to test the result of each of the
01824   /// comparison libcall against zero.
01825   ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
01826 
01827   /// Stores the CallingConv that should be used for each libcall.
01828   CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
01829 
01830 protected:
01831   /// \brief Specify maximum number of store instructions per memset call.
01832   ///
01833   /// When lowering \@llvm.memset this field specifies the maximum number of
01834   /// store operations that may be substituted for the call to memset. Targets
01835   /// must set this value based on the cost threshold for that target. Targets
01836   /// should assume that the memset will be done using as many of the largest
01837   /// store operations first, followed by smaller ones, if necessary, per
01838   /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
01839   /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
01840   /// store.  This only applies to setting a constant array of a constant size.
01841   unsigned MaxStoresPerMemset;
01842 
01843   /// Maximum number of stores operations that may be substituted for the call
01844   /// to memset, used for functions with OptSize attribute.
01845   unsigned MaxStoresPerMemsetOptSize;
01846 
01847   /// \brief Specify maximum bytes of store instructions per memcpy call.
01848   ///
01849   /// When lowering \@llvm.memcpy this field specifies the maximum number of
01850   /// store operations that may be substituted for a call to memcpy. Targets
01851   /// must set this value based on the cost threshold for that target. Targets
01852   /// should assume that the memcpy will be done using as many of the largest
01853   /// store operations first, followed by smaller ones, if necessary, per
01854   /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
01855   /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
01856   /// and one 1-byte store. This only applies to copying a constant array of
01857   /// constant size.
01858   unsigned MaxStoresPerMemcpy;
01859 
01860   /// Maximum number of store operations that may be substituted for a call to
01861   /// memcpy, used for functions with OptSize attribute.
01862   unsigned MaxStoresPerMemcpyOptSize;
01863 
01864   /// \brief Specify maximum bytes of store instructions per memmove call.
01865   ///
01866   /// When lowering \@llvm.memmove this field specifies the maximum number of
01867   /// store instructions that may be substituted for a call to memmove. Targets
01868   /// must set this value based on the cost threshold for that target. Targets
01869   /// should assume that the memmove will be done using as many of the largest
01870   /// store operations first, followed by smaller ones, if necessary, per
01871   /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
01872   /// with 8-bit alignment would result in nine 1-byte stores.  This only
01873   /// applies to copying a constant array of constant size.
01874   unsigned MaxStoresPerMemmove;
01875 
01876   /// Maximum number of store instructions that may be substituted for a call to
01877   /// memmove, used for functions with OpSize attribute.
01878   unsigned MaxStoresPerMemmoveOptSize;
01879 
01880   /// Tells the code generator that select is more expensive than a branch if
01881   /// the branch is usually predicted right.
01882   bool PredictableSelectIsExpensive;
01883 
01884   /// MaskAndBranchFoldingIsLegal - Indicates if the target supports folding
01885   /// a mask of a single bit, a compare, and a branch into a single instruction.
01886   bool MaskAndBranchFoldingIsLegal;
01887 
01888 protected:
01889   /// Return true if the value types that can be represented by the specified
01890   /// register class are all legal.
01891   bool isLegalRC(const TargetRegisterClass *RC) const;
01892 
01893   /// Replace/modify any TargetFrameIndex operands with a targte-dependent
01894   /// sequence of memory operands that is recognized by PrologEpilogInserter.
01895   MachineBasicBlock *emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const;
01896 };
01897 
01898 /// This class defines information used to lower LLVM code to legal SelectionDAG
01899 /// operators that the target instruction selector can accept natively.
01900 ///
01901 /// This class also defines callbacks that targets must implement to lower
01902 /// target-specific constructs to SelectionDAG operators.
01903 class TargetLowering : public TargetLoweringBase {
01904   TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION;
01905   void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION;
01906 
01907 public:
01908   /// NOTE: The constructor takes ownership of TLOF.
01909   explicit TargetLowering(const TargetMachine &TM,
01910                           const TargetLoweringObjectFile *TLOF);
01911 
01912   /// Returns true by value, base pointer and offset pointer and addressing mode
01913   /// by reference if the node's address can be legally represented as
01914   /// pre-indexed load / store address.
01915   virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
01916                                          SDValue &/*Offset*/,
01917                                          ISD::MemIndexedMode &/*AM*/,
01918                                          SelectionDAG &/*DAG*/) const {
01919     return false;
01920   }
01921 
01922   /// Returns true by value, base pointer and offset pointer and addressing mode
01923   /// by reference if this node can be combined with a load / store to form a
01924   /// post-indexed load / store.
01925   virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
01926                                           SDValue &/*Base*/,
01927                                           SDValue &/*Offset*/,
01928                                           ISD::MemIndexedMode &/*AM*/,
01929                                           SelectionDAG &/*DAG*/) const {
01930     return false;
01931   }
01932 
01933   /// Return the entry encoding for a jump table in the current function.  The
01934   /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
01935   virtual unsigned getJumpTableEncoding() const;
01936 
01937   virtual const MCExpr *
01938   LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
01939                             const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
01940                             MCContext &/*Ctx*/) const {
01941     llvm_unreachable("Need to implement this hook if target has custom JTIs");
01942   }
01943 
01944   /// Returns relocation base for the given PIC jumptable.
01945   virtual SDValue getPICJumpTableRelocBase(SDValue Table,
01946                                            SelectionDAG &DAG) const;
01947 
01948   /// This returns the relocation base for the given PIC jumptable, the same as
01949   /// getPICJumpTableRelocBase, but as an MCExpr.
01950   virtual const MCExpr *
01951   getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
01952                                unsigned JTI, MCContext &Ctx) const;
01953 
01954   /// Return true if folding a constant offset with the given GlobalAddress is
01955   /// legal.  It is frequently not legal in PIC relocation models.
01956   virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
01957 
01958   bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
01959                             SDValue &Chain) const;
01960 
01961   void softenSetCCOperands(SelectionDAG &DAG, EVT VT,
01962                            SDValue &NewLHS, SDValue &NewRHS,
01963                            ISD::CondCode &CCCode, SDLoc DL) const;
01964 
01965   /// Returns a pair of (return value, chain).
01966   std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
01967                                           EVT RetVT, const SDValue *Ops,
01968                                           unsigned NumOps, bool isSigned,
01969                                           SDLoc dl, bool doesNotReturn = false,
01970                                           bool isReturnValueUsed = true) const;
01971 
01972   //===--------------------------------------------------------------------===//
01973   // TargetLowering Optimization Methods
01974   //
01975 
01976   /// A convenience struct that encapsulates a DAG, and two SDValues for
01977   /// returning information from TargetLowering to its clients that want to
01978   /// combine.
01979   struct TargetLoweringOpt {
01980     SelectionDAG &DAG;
01981     bool LegalTys;
01982     bool LegalOps;
01983     SDValue Old;
01984     SDValue New;
01985 
01986     explicit TargetLoweringOpt(SelectionDAG &InDAG,
01987                                bool LT, bool LO) :
01988       DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
01989 
01990     bool LegalTypes() const { return LegalTys; }
01991     bool LegalOperations() const { return LegalOps; }
01992 
01993     bool CombineTo(SDValue O, SDValue N) {
01994       Old = O;
01995       New = N;
01996       return true;
01997     }
01998 
01999     /// Check to see if the specified operand of the specified instruction is a
02000     /// constant integer.  If so, check to see if there are any bits set in the
02001     /// constant that are not demanded.  If so, shrink the constant and return
02002     /// true.
02003     bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
02004 
02005     /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.  This
02006     /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
02007     /// generalized for targets with other types of implicit widening casts.
02008     bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
02009                           SDLoc dl);
02010   };
02011 
02012   /// Look at Op.  At this point, we know that only the DemandedMask bits of the
02013   /// result of Op are ever used downstream.  If we can use this information to
02014   /// simplify Op, create a new simplified DAG node and return true, returning
02015   /// the original and new nodes in Old and New.  Otherwise, analyze the
02016   /// expression and return a mask of KnownOne and KnownZero bits for the
02017   /// expression (used to simplify the caller).  The KnownZero/One bits may only
02018   /// be accurate for those bits in the DemandedMask.
02019   bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
02020                             APInt &KnownZero, APInt &KnownOne,
02021                             TargetLoweringOpt &TLO, unsigned Depth = 0) const;
02022 
02023   /// Determine which of the bits specified in Mask are known to be either zero
02024   /// or one and return them in the KnownZero/KnownOne bitsets.
02025   virtual void computeKnownBitsForTargetNode(const SDValue Op,
02026                                              APInt &KnownZero,
02027                                              APInt &KnownOne,
02028                                              const SelectionDAG &DAG,
02029                                              unsigned Depth = 0) const;
02030 
02031   /// This method can be implemented by targets that want to expose additional
02032   /// information about sign bits to the DAG Combiner.
02033   virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
02034                                                    const SelectionDAG &DAG,
02035                                                    unsigned Depth = 0) const;
02036 
02037   struct DAGCombinerInfo {
02038     void *DC;  // The DAG Combiner object.
02039     CombineLevel Level;
02040     bool CalledByLegalizer;
02041   public:
02042     SelectionDAG &DAG;
02043 
02044     DAGCombinerInfo(SelectionDAG &dag, CombineLevel level,  bool cl, void *dc)
02045       : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
02046 
02047     bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
02048     bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
02049     bool isAfterLegalizeVectorOps() const {
02050       return Level == AfterLegalizeDAG;
02051     }
02052     CombineLevel getDAGCombineLevel() { return Level; }
02053     bool isCalledByLegalizer() const { return CalledByLegalizer; }
02054 
02055     void AddToWorklist(SDNode *N);
02056     void RemoveFromWorklist(SDNode *N);
02057     SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
02058                       bool AddTo = true);
02059     SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
02060     SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
02061 
02062     void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
02063   };
02064 
02065   /// Return if the N is a constant or constant vector equal to the true value
02066   /// from getBooleanContents().
02067   bool isConstTrueVal(const SDNode *N) const;
02068 
02069   /// Return if the N is a constant or constant vector equal to the false value
02070   /// from getBooleanContents().
02071   bool isConstFalseVal(const SDNode *N) const;
02072 
02073   /// Try to simplify a setcc built with the specified operands and cc. If it is
02074   /// unable to simplify it, return a null SDValue.
02075   SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
02076                           ISD::CondCode Cond, bool foldBooleans,
02077                           DAGCombinerInfo &DCI, SDLoc dl) const;
02078 
02079   /// Returns true (and the GlobalValue and the offset) if the node is a
02080   /// GlobalAddress + offset.
02081   virtual bool
02082   isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
02083 
02084   /// This method will be invoked for all target nodes and for any
02085   /// target-independent nodes that the target has registered with invoke it
02086   /// for.
02087   ///
02088   /// The semantics are as follows:
02089   /// Return Value:
02090   ///   SDValue.Val == 0   - No change was made
02091   ///   SDValue.Val == N   - N was replaced, is dead, and is already handled.
02092   ///   otherwise          - N should be replaced by the returned Operand.
02093   ///
02094   /// In addition, methods provided by DAGCombinerInfo may be used to perform
02095   /// more complex transformations.
02096   ///
02097   virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
02098 
02099   /// Return true if it is profitable to move a following shift through this
02100   //  node, adjusting any immediate operands as necessary to preserve semantics.
02101   //  This transformation may not be desirable if it disrupts a particularly
02102   //  auspicious target-specific tree (e.g. bitfield extraction in AArch64).
02103   //  By default, it returns true.
02104   virtual bool isDesirableToCommuteWithShift(const SDNode *N /*Op*/) const {
02105     return true;
02106   }
02107 
02108   /// Return true if the target has native support for the specified value type
02109   /// and it is 'desirable' to use the type for the given node type. e.g. On x86
02110   /// i16 is legal, but undesirable since i16 instruction encodings are longer
02111   /// and some i16 instructions are slow.
02112   virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
02113     // By default, assume all legal types are desirable.
02114     return isTypeLegal(VT);
02115   }
02116 
02117   /// Return true if it is profitable for dag combiner to transform a floating
02118   /// point op of specified opcode to a equivalent op of an integer
02119   /// type. e.g. f32 load -> i32 load can be profitable on ARM.
02120   virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
02121                                                  EVT /*VT*/) const {
02122     return false;
02123   }
02124 
02125   /// This method query the target whether it is beneficial for dag combiner to
02126   /// promote the specified node. If true, it should return the desired
02127   /// promotion type by reference.
02128   virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
02129     return false;
02130   }
02131 
02132   //===--------------------------------------------------------------------===//
02133   // Lowering methods - These methods must be implemented by targets so that
02134   // the SelectionDAGBuilder code knows how to lower these.
02135   //
02136 
02137   /// This hook must be implemented to lower the incoming (formal) arguments,
02138   /// described by the Ins array, into the specified DAG. The implementation
02139   /// should fill in the InVals array with legal-type argument values, and
02140   /// return the resulting token chain value.
02141   ///
02142   virtual SDValue
02143     LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
02144                          bool /*isVarArg*/,
02145                          const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
02146                          SDLoc /*dl*/, SelectionDAG &/*DAG*/,
02147                          SmallVectorImpl<SDValue> &/*InVals*/) const {
02148     llvm_unreachable("Not Implemented");
02149   }
02150 
02151   struct ArgListEntry {
02152     SDValue Node;
02153     Type* Ty;
02154     bool isSExt     : 1;
02155     bool isZExt     : 1;
02156     bool isInReg    : 1;
02157     bool isSRet     : 1;
02158     bool isNest     : 1;
02159     bool isByVal    : 1;
02160     bool isInAlloca : 1;
02161     bool isReturned : 1;
02162     uint16_t Alignment;
02163 
02164     ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
02165       isSRet(false), isNest(false), isByVal(false), isInAlloca(false),
02166       isReturned(false), Alignment(0) { }
02167 
02168     void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
02169   };
02170   typedef std::vector<ArgListEntry> ArgListTy;
02171 
02172   /// This structure contains all information that is necessary for lowering
02173   /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
02174   /// needs to lower a call, and targets will see this struct in their LowerCall
02175   /// implementation.
02176   struct CallLoweringInfo {
02177     SDValue Chain;
02178     Type *RetTy;
02179     bool RetSExt           : 1;
02180     bool RetZExt           : 1;
02181     bool IsVarArg          : 1;
02182     bool IsInReg           : 1;
02183     bool DoesNotReturn     : 1;
02184     bool IsReturnValueUsed : 1;
02185 
02186     // IsTailCall should be modified by implementations of
02187     // TargetLowering::LowerCall that perform tail call conversions.
02188     bool IsTailCall;
02189 
02190     unsigned NumFixedArgs;
02191     CallingConv::ID CallConv;
02192     SDValue Callee;
02193     ArgListTy Args;
02194     SelectionDAG &DAG;
02195     SDLoc DL;
02196     ImmutableCallSite *CS;
02197     SmallVector<ISD::OutputArg, 32> Outs;
02198     SmallVector<SDValue, 32> OutVals;
02199     SmallVector<ISD::InputArg, 32> Ins;
02200 
02201     CallLoweringInfo(SelectionDAG &DAG)
02202       : RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
02203         IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true),
02204         IsTailCall(false), NumFixedArgs(-1), CallConv(CallingConv::C),
02205         DAG(DAG), CS(nullptr) {}
02206 
02207     CallLoweringInfo &setDebugLoc(SDLoc dl) {
02208       DL = dl;
02209       return *this;
02210     }
02211 
02212     CallLoweringInfo &setChain(SDValue InChain) {
02213       Chain = InChain;
02214       return *this;
02215     }
02216 
02217     CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
02218                                 SDValue Target, ArgListTy &&ArgsList,
02219                                 unsigned FixedArgs = -1) {
02220       RetTy = ResultType;
02221       Callee = Target;
02222       CallConv = CC;
02223       NumFixedArgs =
02224         (FixedArgs == static_cast<unsigned>(-1) ? Args.size() : FixedArgs);
02225       Args = std::move(ArgsList);
02226       return *this;
02227     }
02228 
02229     CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
02230                                 SDValue Target, ArgListTy &&ArgsList,
02231                                 ImmutableCallSite &Call) {
02232       RetTy = ResultType;
02233 
02234       IsInReg = Call.paramHasAttr(0, Attribute::InReg);
02235       DoesNotReturn = Call.doesNotReturn();
02236       IsVarArg = FTy->isVarArg();
02237       IsReturnValueUsed = !Call.getInstruction()->use_empty();
02238       RetSExt = Call.paramHasAttr(0, Attribute::SExt);
02239       RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
02240 
02241       Callee = Target;
02242 
02243       CallConv = Call.getCallingConv();
02244       NumFixedArgs = FTy->getNumParams();
02245       Args = std::move(ArgsList);
02246 
02247       CS = &Call;
02248 
02249       return *this;
02250     }
02251 
02252     CallLoweringInfo &setInRegister(bool Value = true) {
02253       IsInReg = Value;
02254       return *this;
02255     }
02256 
02257     CallLoweringInfo &setNoReturn(bool Value = true) {
02258       DoesNotReturn = Value;
02259       return *this;
02260     }
02261 
02262     CallLoweringInfo &setVarArg(bool Value = true) {
02263       IsVarArg = Value;
02264       return *this;
02265     }
02266 
02267     CallLoweringInfo &setTailCall(bool Value = true) {
02268       IsTailCall = Value;
02269       return *this;
02270     }
02271 
02272     CallLoweringInfo &setDiscardResult(bool Value = true) {
02273       IsReturnValueUsed = !Value;
02274       return *this;
02275     }
02276 
02277     CallLoweringInfo &setSExtResult(bool Value = true) {
02278       RetSExt = Value;
02279       return *this;
02280     }
02281 
02282     CallLoweringInfo &setZExtResult(bool Value = true) {
02283       RetZExt = Value;
02284       return *this;
02285     }
02286 
02287     ArgListTy &getArgs() {
02288       return Args;
02289     }
02290   };
02291 
02292   /// This function lowers an abstract call to a function into an actual call.
02293   /// This returns a pair of operands.  The first element is the return value
02294   /// for the function (if RetTy is not VoidTy).  The second element is the
02295   /// outgoing token chain. It calls LowerCall to do the actual lowering.
02296   std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
02297 
02298   /// This hook must be implemented to lower calls into the the specified
02299   /// DAG. The outgoing arguments to the call are described by the Outs array,
02300   /// and the values to be returned by the call are described by the Ins
02301   /// array. The implementation should fill in the InVals array with legal-type
02302   /// return values from the call, and return the resulting token chain value.
02303   virtual SDValue
02304     LowerCall(CallLoweringInfo &/*CLI*/,
02305               SmallVectorImpl<SDValue> &/*InVals*/) const {
02306     llvm_unreachable("Not Implemented");
02307   }
02308 
02309   /// Target-specific cleanup for formal ByVal parameters.
02310   virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
02311 
02312   /// This hook should be implemented to check whether the return values
02313   /// described by the Outs array can fit into the return registers.  If false
02314   /// is returned, an sret-demotion is performed.
02315   virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
02316                               MachineFunction &/*MF*/, bool /*isVarArg*/,
02317                const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
02318                LLVMContext &/*Context*/) const
02319   {
02320     // Return true by default to get preexisting behavior.
02321     return true;
02322   }
02323 
02324   /// This hook must be implemented to lower outgoing return values, described
02325   /// by the Outs array, into the specified DAG. The implementation should
02326   /// return the resulting token chain value.
02327   virtual SDValue
02328     LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
02329                 bool /*isVarArg*/,
02330                 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
02331                 const SmallVectorImpl<SDValue> &/*OutVals*/,
02332                 SDLoc /*dl*/, SelectionDAG &/*DAG*/) const {
02333     llvm_unreachable("Not Implemented");
02334   }
02335 
02336   /// Return true if result of the specified node is used by a return node
02337   /// only. It also compute and return the input chain for the tail call.
02338   ///
02339   /// This is used to determine whether it is possible to codegen a libcall as
02340   /// tail call at legalization time.
02341   virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
02342     return false;
02343   }
02344 
02345   /// Return true if the target may be able emit the call instruction as a tail
02346   /// call. This is used by optimization passes to determine if it's profitable
02347   /// to duplicate return instructions to enable tailcall optimization.
02348   virtual bool mayBeEmittedAsTailCall(CallInst *) const {
02349     return false;
02350   }
02351 
02352   /// Return the builtin name for the __builtin___clear_cache intrinsic
02353   /// Default is to invoke the clear cache library call
02354   virtual const char * getClearCacheBuiltinName() const {
02355     return "__clear_cache";
02356   }
02357 
02358   /// Return the register ID of the name passed in. Used by named register
02359   /// global variables extension. There is no target-independent behaviour
02360   /// so the default action is to bail.
02361   virtual unsigned getRegisterByName(const char* RegName, EVT VT) const {
02362     report_fatal_error("Named registers not implemented for this target");
02363   }
02364 
02365   /// Return the type that should be used to zero or sign extend a
02366   /// zeroext/signext integer argument or return value.  FIXME: Most C calling
02367   /// convention requires the return type to be promoted, but this is not true
02368   /// all the time, e.g. i1 on x86-64. It is also not necessary for non-C
02369   /// calling conventions. The frontend should handle this and include all of
02370   /// the necessary information.
02371   virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
02372                                        ISD::NodeType /*ExtendKind*/) const {
02373     EVT MinVT = getRegisterType(Context, MVT::i32);
02374     return VT.bitsLT(MinVT) ? MinVT : VT;
02375   }
02376 
02377   /// For some targets, an LLVM struct type must be broken down into multiple
02378   /// simple types, but the calling convention specifies that the entire struct
02379   /// must be passed in a block of consecutive registers.
02380   virtual bool
02381   functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
02382                                             bool isVarArg) const {
02383     return false;
02384   }
02385 
02386   /// Returns a 0 terminated array of registers that can be safely used as
02387   /// scratch registers.
02388   virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
02389     return nullptr;
02390   }
02391 
02392   /// This callback is used to prepare for a volatile or atomic load.
02393   /// It takes a chain node as input and returns the chain for the load itself.
02394   ///
02395   /// Having a callback like this is necessary for targets like SystemZ,
02396   /// which allows a CPU to reuse the result of a previous load indefinitely,
02397   /// even if a cache-coherent store is performed by another CPU.  The default
02398   /// implementation does nothing.
02399   virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL,
02400                                               SelectionDAG &DAG) const {
02401     return Chain;
02402   }
02403 
02404   /// This callback is invoked by the type legalizer to legalize nodes with an
02405   /// illegal operand type but legal result types.  It replaces the
02406   /// LowerOperation callback in the type Legalizer.  The reason we can not do
02407   /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
02408   /// use this callback.
02409   ///
02410   /// TODO: Consider merging with ReplaceNodeResults.
02411   ///
02412   /// The target places new result values for the node in Results (their number
02413   /// and types must exactly match those of the original return values of
02414   /// the node), or leaves Results empty, which indicates that the node is not
02415   /// to be custom lowered after all.
02416   /// The default implementation calls LowerOperation.
02417   virtual void LowerOperationWrapper(SDNode *N,
02418                                      SmallVectorImpl<SDValue> &Results,
02419                                      SelectionDAG &DAG) const;
02420 
02421   /// This callback is invoked for operations that are unsupported by the
02422   /// target, which are registered to use 'custom' lowering, and whose defined
02423   /// values are all legal.  If the target has no operations that require custom
02424   /// lowering, it need not implement this.  The default implementation of this
02425   /// aborts.
02426   virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
02427 
02428   /// This callback is invoked when a node result type is illegal for the
02429   /// target, and the operation was registered to use 'custom' lowering for that
02430   /// result type.  The target places new result values for the node in Results
02431   /// (their number and types must exactly match those of the original return
02432   /// values of the node), or leaves Results empty, which indicates that the
02433   /// node is not to be custom lowered after all.
02434   ///
02435   /// If the target has no operations that require custom lowering, it need not
02436   /// implement this.  The default implementation aborts.
02437   virtual void ReplaceNodeResults(SDNode * /*N*/,
02438                                   SmallVectorImpl<SDValue> &/*Results*/,
02439                                   SelectionDAG &/*DAG*/) const {
02440     llvm_unreachable("ReplaceNodeResults not implemented for this target!");
02441   }
02442 
02443   /// This method returns the name of a target specific DAG node.
02444   virtual const char *getTargetNodeName(unsigned Opcode) const;
02445 
02446   /// This method returns a target specific FastISel object, or null if the
02447   /// target does not support "fast" ISel.
02448   virtual FastISel *createFastISel(FunctionLoweringInfo &,
02449                                    const TargetLibraryInfo *) const {
02450     return nullptr;
02451   }
02452 
02453 
02454   bool verifyReturnAddressArgumentIsConstant(SDValue Op,
02455                                              SelectionDAG &DAG) const;
02456 
02457   //===--------------------------------------------------------------------===//
02458   // Inline Asm Support hooks
02459   //
02460 
02461   /// This hook allows the target to expand an inline asm call to be explicit
02462   /// llvm code if it wants to.  This is useful for turning simple inline asms
02463   /// into LLVM intrinsics, which gives the compiler more information about the
02464   /// behavior of the code.
02465   virtual bool ExpandInlineAsm(CallInst *) const {
02466     return false;
02467   }
02468 
02469   enum ConstraintType {
02470     C_Register,            // Constraint represents specific register(s).
02471     C_RegisterClass,       // Constraint represents any of register(s) in class.
02472     C_Memory,              // Memory constraint.
02473     C_Other,               // Something else.
02474     C_Unknown              // Unsupported constraint.
02475   };
02476 
02477   enum ConstraintWeight {
02478     // Generic weights.
02479     CW_Invalid  = -1,     // No match.
02480     CW_Okay     = 0,      // Acceptable.
02481     CW_Good     = 1,      // Good weight.
02482     CW_Better   = 2,      // Better weight.
02483     CW_Best     = 3,      // Best weight.
02484 
02485     // Well-known weights.
02486     CW_SpecificReg  = CW_Okay,    // Specific register operands.
02487     CW_Register     = CW_Good,    // Register operands.
02488     CW_Memory       = CW_Better,  // Memory operands.
02489     CW_Constant     = CW_Best,    // Constant operand.
02490     CW_Default      = CW_Okay     // Default or don't know type.
02491   };
02492 
02493   /// This contains information for each constraint that we are lowering.
02494   struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
02495     /// This contains the actual string for the code, like "m".  TargetLowering
02496     /// picks the 'best' code from ConstraintInfo::Codes that most closely
02497     /// matches the operand.
02498     std::string ConstraintCode;
02499 
02500     /// Information about the constraint code, e.g. Register, RegisterClass,
02501     /// Memory, Other, Unknown.
02502     TargetLowering::ConstraintType ConstraintType;
02503 
02504     /// If this is the result output operand or a clobber, this is null,
02505     /// otherwise it is the incoming operand to the CallInst.  This gets
02506     /// modified as the asm is processed.
02507     Value *CallOperandVal;
02508 
02509     /// The ValueType for the operand value.
02510     MVT ConstraintVT;
02511 
02512     /// Return true of this is an input operand that is a matching constraint
02513     /// like "4".
02514     bool isMatchingInputConstraint() const;
02515 
02516     /// If this is an input matching constraint, this method returns the output
02517     /// operand it matches.
02518     unsigned getMatchedOperand() const;
02519 
02520     /// Copy constructor for copying from a ConstraintInfo.
02521     AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
02522       : InlineAsm::ConstraintInfo(info),
02523         ConstraintType(TargetLowering::C_Unknown),
02524         CallOperandVal(nullptr), ConstraintVT(MVT::Other) {
02525     }
02526   };
02527 
02528   typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
02529 
02530   /// Split up the constraint string from the inline assembly value into the
02531   /// specific constraints and their prefixes, and also tie in the associated
02532   /// operand values.  If this returns an empty vector, and if the constraint
02533   /// string itself isn't empty, there was an error parsing.
02534   virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const;
02535 
02536   /// Examine constraint type and operand type and determine a weight value.
02537   /// The operand object must already have been set up with the operand type.
02538   virtual ConstraintWeight getMultipleConstraintMatchWeight(
02539       AsmOperandInfo &info, int maIndex) const;
02540 
02541   /// Examine constraint string and operand type and determine a weight value.
02542   /// The operand object must already have been set up with the operand type.
02543   virtual ConstraintWeight getSingleConstraintMatchWeight(
02544       AsmOperandInfo &info, const char *constraint) const;
02545 
02546   /// Determines the constraint code and constraint type to use for the specific
02547   /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
02548   /// If the actual operand being passed in is available, it can be passed in as
02549   /// Op, otherwise an empty SDValue can be passed.
02550   virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
02551                                       SDValue Op,
02552                                       SelectionDAG *DAG = nullptr) const;
02553 
02554   /// Given a constraint, return the type of constraint it is for this target.
02555   virtual ConstraintType getConstraintType(const std::string &Constraint) const;
02556 
02557   /// Given a physical register constraint (e.g.  {edx}), return the register
02558   /// number and the register class for the register.
02559   ///
02560   /// Given a register class constraint, like 'r', if this corresponds directly
02561   /// to an LLVM register class, return a register of 0 and the register class
02562   /// pointer.
02563   ///
02564   /// This should only be used for C_Register constraints.  On error, this
02565   /// returns a register number of 0 and a null register class pointer..
02566   virtual std::pair<unsigned, const TargetRegisterClass*>
02567     getRegForInlineAsmConstraint(const std::string &Constraint,
02568                                  MVT VT) const;
02569 
02570   /// Try to replace an X constraint, which matches anything, with another that
02571   /// has more specific requirements based on the type of the corresponding
02572   /// operand.  This returns null if there is no replacement to make.
02573   virtual const char *LowerXConstraint(EVT ConstraintVT) const;
02574 
02575   /// Lower the specified operand into the Ops vector.  If it is invalid, don't
02576   /// add anything to Ops.
02577   virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
02578                                             std::vector<SDValue> &Ops,
02579                                             SelectionDAG &DAG) const;
02580 
02581   //===--------------------------------------------------------------------===//
02582   // Div utility functions
02583   //
02584   SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl,
02585                          SelectionDAG &DAG) const;
02586   SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
02587                     bool IsAfterLegalization,
02588                     std::vector<SDNode *> *Created) const;
02589   SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
02590                     bool IsAfterLegalization,
02591                     std::vector<SDNode *> *Created) const;
02592   virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
02593                                 SelectionDAG &DAG,
02594                                 std::vector<SDNode *> *Created) const {
02595     return SDValue();
02596   }
02597 
02598   //===--------------------------------------------------------------------===//
02599   // Legalization utility functions
02600   //
02601 
02602   /// Expand a MUL into two nodes.  One that computes the high bits of
02603   /// the result and one that computes the low bits.
02604   /// \param HiLoVT The value type to use for the Lo and Hi nodes.
02605   /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
02606   ///        if you want to control how low bits are extracted from the LHS.
02607   /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
02608   /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
02609   /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
02610   /// \returns true if the node has been expanded. false if it has not
02611   bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
02612                  SelectionDAG &DAG, SDValue LL = SDValue(),
02613                  SDValue LH = SDValue(), SDValue RL = SDValue(),
02614                  SDValue RH = SDValue()) const;
02615 
02616   /// Expand float(f32) to SINT(i64) conversion
02617   /// \param N Node to expand
02618   /// \param Result output after conversion
02619   /// \returns True, if the expansion was successful, false otherwise
02620   bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
02621 
02622   //===--------------------------------------------------------------------===//
02623   // Instruction Emitting Hooks
02624   //
02625 
02626   /// This method should be implemented by targets that mark instructions with
02627   /// the 'usesCustomInserter' flag.  These instructions are special in various
02628   /// ways, which require special support to insert.  The specified MachineInstr
02629   /// is created but not inserted into any basic blocks, and this method is
02630   /// called to expand it into a sequence of instructions, potentially also
02631   /// creating new basic blocks and control flow.
02632   virtual MachineBasicBlock *
02633     EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
02634 
02635   /// This method should be implemented by targets that mark instructions with
02636   /// the 'hasPostISelHook' flag. These instructions must be adjusted after
02637   /// instruction selection by target hooks.  e.g. To fill in optional defs for
02638   /// ARM 's' setting instructions.
02639   virtual void
02640   AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
02641 
02642   /// If this function returns true, SelectionDAGBuilder emits a
02643   /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
02644   virtual bool useLoadStackGuardNode() const {
02645     return false;
02646   }
02647 };
02648 
02649 /// Given an LLVM IR type and return type attributes, compute the return value
02650 /// EVTs and flags, and optionally also the offsets, if the return value is
02651 /// being lowered to memory.
02652 void GetReturnInfo(Type* ReturnType, AttributeSet attr,
02653                    SmallVectorImpl<ISD::OutputArg> &Outs,
02654                    const TargetLowering &TLI);
02655 
02656 } // end llvm namespace
02657 
02658 #endif