LLVM API Documentation

AArch64ISelLowering.h
Go to the documentation of this file.
00001 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
00011 // selection DAG.
00012 //
00013 //===----------------------------------------------------------------------===//
00014 
00015 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
00016 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
00017 
00018 #include "llvm/CodeGen/CallingConvLower.h"
00019 #include "llvm/CodeGen/SelectionDAG.h"
00020 #include "llvm/IR/CallingConv.h"
00021 #include "llvm/Target/TargetLowering.h"
00022 
00023 namespace llvm {
00024 
00025 namespace AArch64ISD {
00026 
00027 enum {
00028   FIRST_NUMBER = ISD::BUILTIN_OP_END,
00029   WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
00030   CALL,         // Function call.
00031 
00032   // Almost the same as a normal call node, except that a TLSDesc relocation is
00033   // needed so the linker can relax it correctly if possible.
00034   TLSDESC_CALL,
00035   ADRP,     // Page address of a TargetGlobalAddress operand.
00036   ADDlow,   // Add the low 12 bits of a TargetGlobalAddress operand.
00037   LOADgot,  // Load from automatically generated descriptor (e.g. Global
00038             // Offset Table, TLS record).
00039   RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
00040   BRCOND,   // Conditional branch instruction; "b.cond".
00041   CSEL,
00042   FCSEL, // Conditional move instruction.
00043   CSINV, // Conditional select invert.
00044   CSNEG, // Conditional select negate.
00045   CSINC, // Conditional select increment.
00046 
00047   // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
00048   // ELF.
00049   THREAD_POINTER,
00050   ADC,
00051   SBC, // adc, sbc instructions
00052 
00053   // Arithmetic instructions which write flags.
00054   ADDS,
00055   SUBS,
00056   ADCS,
00057   SBCS,
00058   ANDS,
00059 
00060   // Floating point comparison
00061   FCMP,
00062 
00063   // Floating point max and min instructions.
00064   FMAX,
00065   FMIN,
00066 
00067   // Scalar extract
00068   EXTR,
00069 
00070   // Scalar-to-vector duplication
00071   DUP,
00072   DUPLANE8,
00073   DUPLANE16,
00074   DUPLANE32,
00075   DUPLANE64,
00076 
00077   // Vector immedate moves
00078   MOVI,
00079   MOVIshift,
00080   MOVIedit,
00081   MOVImsl,
00082   FMOV,
00083   MVNIshift,
00084   MVNImsl,
00085 
00086   // Vector immediate ops
00087   BICi,
00088   ORRi,
00089 
00090   // Vector bit select: similar to ISD::VSELECT but not all bits within an
00091   // element must be identical.
00092   BSL,
00093 
00094   // Vector arithmetic negation
00095   NEG,
00096 
00097   // Vector shuffles
00098   ZIP1,
00099   ZIP2,
00100   UZP1,
00101   UZP2,
00102   TRN1,
00103   TRN2,
00104   REV16,
00105   REV32,
00106   REV64,
00107   EXT,
00108 
00109   // Vector shift by scalar
00110   VSHL,
00111   VLSHR,
00112   VASHR,
00113 
00114   // Vector shift by scalar (again)
00115   SQSHL_I,
00116   UQSHL_I,
00117   SQSHLU_I,
00118   SRSHR_I,
00119   URSHR_I,
00120 
00121   // Vector comparisons
00122   CMEQ,
00123   CMGE,
00124   CMGT,
00125   CMHI,
00126   CMHS,
00127   FCMEQ,
00128   FCMGE,
00129   FCMGT,
00130 
00131   // Vector zero comparisons
00132   CMEQz,
00133   CMGEz,
00134   CMGTz,
00135   CMLEz,
00136   CMLTz,
00137   FCMEQz,
00138   FCMGEz,
00139   FCMGTz,
00140   FCMLEz,
00141   FCMLTz,
00142 
00143   // Vector bitwise negation
00144   NOT,
00145 
00146   // Vector bitwise selection
00147   BIT,
00148 
00149   // Compare-and-branch
00150   CBZ,
00151   CBNZ,
00152   TBZ,
00153   TBNZ,
00154 
00155   // Tail calls
00156   TC_RETURN,
00157 
00158   // Custom prefetch handling
00159   PREFETCH,
00160 
00161   // {s|u}int to FP within a FP register.
00162   SITOF,
00163   UITOF,
00164 
00165   /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
00166   /// world w.r.t vectors; which causes additional REV instructions to be
00167   /// generated to compensate for the byte-swapping. But sometimes we do
00168   /// need to re-interpret the data in SIMD vector registers in big-endian
00169   /// mode without emitting such REV instructions.
00170   NVCAST,
00171 
00172   // NEON Load/Store with post-increment base updates
00173   LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
00174   LD3post,
00175   LD4post,
00176   ST2post,
00177   ST3post,
00178   ST4post,
00179   LD1x2post,
00180   LD1x3post,
00181   LD1x4post,
00182   ST1x2post,
00183   ST1x3post,
00184   ST1x4post,
00185   LD1DUPpost,
00186   LD2DUPpost,
00187   LD3DUPpost,
00188   LD4DUPpost,
00189   LD1LANEpost,
00190   LD2LANEpost,
00191   LD3LANEpost,
00192   LD4LANEpost,
00193   ST2LANEpost,
00194   ST3LANEpost,
00195   ST4LANEpost
00196 };
00197 
00198 } // end namespace AArch64ISD
00199 
00200 class AArch64Subtarget;
00201 class AArch64TargetMachine;
00202 
00203 class AArch64TargetLowering : public TargetLowering {
00204   bool RequireStrictAlign;
00205 
00206 public:
00207   explicit AArch64TargetLowering(TargetMachine &TM);
00208 
00209   /// Selects the correct CCAssignFn for a given CallingConvention value.
00210   CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
00211 
00212   /// computeKnownBitsForTargetNode - Determine which of the bits specified in
00213   /// Mask are known to be either zero or one and return them in the
00214   /// KnownZero/KnownOne bitsets.
00215   void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
00216                                      APInt &KnownOne, const SelectionDAG &DAG,
00217                                      unsigned Depth = 0) const override;
00218 
00219   MVT getScalarShiftAmountTy(EVT LHSTy) const override;
00220 
00221   /// allowsMisalignedMemoryAccesses - Returns true if the target allows
00222   /// unaligned memory accesses. of the specified type.
00223   bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
00224                                       unsigned Align = 1,
00225                                       bool *Fast = nullptr) const override {
00226     if (RequireStrictAlign)
00227       return false;
00228     // FIXME: True for Cyclone, but not necessary others.
00229     if (Fast)
00230       *Fast = true;
00231     return true;
00232   }
00233 
00234   /// LowerOperation - Provide custom lowering hooks for some operations.
00235   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
00236 
00237   const char *getTargetNodeName(unsigned Opcode) const override;
00238 
00239   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
00240 
00241   /// getFunctionAlignment - Return the Log2 alignment of this function.
00242   unsigned getFunctionAlignment(const Function *F) const;
00243 
00244   /// getMaximalGlobalOffset - Returns the maximal possible offset which can
00245   /// be used for loads / stores from the global.
00246   unsigned getMaximalGlobalOffset() const override;
00247 
00248   /// Returns true if a cast between SrcAS and DestAS is a noop.
00249   bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
00250     // Addrspacecasts are always noops.
00251     return true;
00252   }
00253 
00254   /// createFastISel - This method returns a target specific FastISel object,
00255   /// or null if the target does not support "fast" ISel.
00256   FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
00257                            const TargetLibraryInfo *libInfo) const override;
00258 
00259   bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
00260 
00261   bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
00262 
00263   /// isShuffleMaskLegal - Return true if the given shuffle mask can be
00264   /// codegen'd directly, or if it should be stack expanded.
00265   bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
00266 
00267   /// getSetCCResultType - Return the ISD::SETCC ValueType
00268   EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
00269 
00270   SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
00271 
00272   MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
00273                                   MachineBasicBlock *BB) const;
00274 
00275   MachineBasicBlock *
00276   EmitInstrWithCustomInserter(MachineInstr *MI,
00277                               MachineBasicBlock *MBB) const override;
00278 
00279   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
00280                           unsigned Intrinsic) const override;
00281 
00282   bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
00283   bool isTruncateFree(EVT VT1, EVT VT2) const override;
00284 
00285   bool isZExtFree(Type *Ty1, Type *Ty2) const override;
00286   bool isZExtFree(EVT VT1, EVT VT2) const override;
00287   bool isZExtFree(SDValue Val, EVT VT2) const override;
00288 
00289   bool hasPairedLoad(Type *LoadedType,
00290                      unsigned &RequiredAligment) const override;
00291   bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
00292 
00293   bool isLegalAddImmediate(int64_t) const override;
00294   bool isLegalICmpImmediate(int64_t) const override;
00295 
00296   EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
00297                           bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
00298                           MachineFunction &MF) const override;
00299 
00300   /// isLegalAddressingMode - Return true if the addressing mode represented
00301   /// by AM is legal for this target, for a load/store of the specified type.
00302   bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
00303 
00304   /// \brief Return the cost of the scaling factor used in the addressing
00305   /// mode represented by AM for this target, for a load/store
00306   /// of the specified type.
00307   /// If the AM is supported, the return value must be >= 0.
00308   /// If the AM is not supported, it returns a negative value.
00309   int getScalingFactorCost(const AddrMode &AM, Type *Ty) const override;
00310 
00311   /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
00312   /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
00313   /// expanded to FMAs when this method returns true, otherwise fmuladd is
00314   /// expanded to fmul + fadd.
00315   bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
00316 
00317   const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
00318 
00319   /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
00320   bool isDesirableToCommuteWithShift(const SDNode *N) const override;
00321 
00322   /// \brief Returns true if it is beneficial to convert a load of a constant
00323   /// to just the constant itself.
00324   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
00325                                          Type *Ty) const override;
00326 
00327   bool hasLoadLinkedStoreConditional() const override;
00328   Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
00329                         AtomicOrdering Ord) const override;
00330   Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
00331                               Value *Addr, AtomicOrdering Ord) const override;
00332 
00333   bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
00334   bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
00335   bool shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
00336 
00337   bool useLoadStackGuardNode() const override;
00338   TargetLoweringBase::LegalizeTypeAction
00339   getPreferredVectorAction(EVT VT) const override;
00340 
00341 private:
00342   /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
00343   /// make the right decision when generating code for different targets.
00344   const AArch64Subtarget *Subtarget;
00345 
00346   void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
00347   void addDRTypeForNEON(MVT VT);
00348   void addQRTypeForNEON(MVT VT);
00349 
00350   SDValue
00351   LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
00352                        const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
00353                        SelectionDAG &DAG,
00354                        SmallVectorImpl<SDValue> &InVals) const override;
00355 
00356   SDValue LowerCall(CallLoweringInfo & /*CLI*/,
00357                     SmallVectorImpl<SDValue> &InVals) const override;
00358 
00359   SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
00360                           CallingConv::ID CallConv, bool isVarArg,
00361                           const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
00362                           SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
00363                           bool isThisReturn, SDValue ThisVal) const;
00364 
00365   bool isEligibleForTailCallOptimization(
00366       SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
00367       bool isCalleeStructRet, bool isCallerStructRet,
00368       const SmallVectorImpl<ISD::OutputArg> &Outs,
00369       const SmallVectorImpl<SDValue> &OutVals,
00370       const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
00371 
00372   /// Finds the incoming stack arguments which overlap the given fixed stack
00373   /// object and incorporates their load into the current chain. This prevents
00374   /// an upcoming store from clobbering the stack argument before it's used.
00375   SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
00376                               MachineFrameInfo *MFI, int ClobberedFI) const;
00377 
00378   bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
00379 
00380   bool IsTailCallConvention(CallingConv::ID CallCC) const;
00381 
00382   void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
00383                            SDValue &Chain) const;
00384 
00385   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
00386                       bool isVarArg,
00387                       const SmallVectorImpl<ISD::OutputArg> &Outs,
00388                       LLVMContext &Context) const override;
00389 
00390   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
00391                       const SmallVectorImpl<ISD::OutputArg> &Outs,
00392                       const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
00393                       SelectionDAG &DAG) const override;
00394 
00395   SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
00396   SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
00397   SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
00398   SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
00399   SDValue LowerELFTLSDescCall(SDValue SymAddr, SDValue DescAddr, SDLoc DL,
00400                               SelectionDAG &DAG) const;
00401   SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
00402   SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
00403   SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
00404   SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
00405   SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
00406   SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
00407   SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
00408   SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
00409   SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
00410   SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
00411   SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
00412   SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
00413   SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
00414   SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
00415   SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
00416   SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
00417   SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
00418   SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
00419   SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
00420   SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
00421   SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
00422   SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
00423   SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
00424   SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
00425   SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
00426   SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
00427                         RTLIB::Libcall Call) const;
00428   SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
00429   SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
00430   SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
00431   SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
00432   SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
00433   SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
00434   SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
00435   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
00436   SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
00437 
00438   SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
00439                         std::vector<SDNode *> *Created) const override;
00440 
00441   ConstraintType
00442   getConstraintType(const std::string &Constraint) const override;
00443   unsigned getRegisterByName(const char* RegName, EVT VT) const override;
00444 
00445   /// Examine constraint string and operand type and determine a weight value.
00446   /// The operand object must already have been set up with the operand type.
00447   ConstraintWeight
00448   getSingleConstraintMatchWeight(AsmOperandInfo &info,
00449                                  const char *constraint) const override;
00450 
00451   std::pair<unsigned, const TargetRegisterClass *>
00452   getRegForInlineAsmConstraint(const std::string &Constraint,
00453                                MVT VT) const override;
00454   void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
00455                                     std::vector<SDValue> &Ops,
00456                                     SelectionDAG &DAG) const override;
00457 
00458   bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
00459   bool mayBeEmittedAsTailCall(CallInst *CI) const override;
00460   bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
00461                               ISD::MemIndexedMode &AM, bool &IsInc,
00462                               SelectionDAG &DAG) const;
00463   bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
00464                                  ISD::MemIndexedMode &AM,
00465                                  SelectionDAG &DAG) const override;
00466   bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
00467                                   SDValue &Offset, ISD::MemIndexedMode &AM,
00468                                   SelectionDAG &DAG) const override;
00469 
00470   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
00471                           SelectionDAG &DAG) const override;
00472 };
00473 
00474 namespace AArch64 {
00475 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
00476                          const TargetLibraryInfo *libInfo);
00477 } // end namespace AArch64
00478 
00479 } // end namespace llvm
00480 
00481 #endif