LLVM API Documentation

AArch64AsmParser.cpp
Go to the documentation of this file.
00001 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 
00010 #include "MCTargetDesc/AArch64AddressingModes.h"
00011 #include "MCTargetDesc/AArch64MCExpr.h"
00012 #include "Utils/AArch64BaseInfo.h"
00013 #include "llvm/ADT/APInt.h"
00014 #include "llvm/ADT/STLExtras.h"
00015 #include "llvm/ADT/SmallString.h"
00016 #include "llvm/ADT/SmallVector.h"
00017 #include "llvm/ADT/StringSwitch.h"
00018 #include "llvm/ADT/Twine.h"
00019 #include "llvm/MC/MCContext.h"
00020 #include "llvm/MC/MCExpr.h"
00021 #include "llvm/MC/MCInst.h"
00022 #include "llvm/MC/MCParser/MCAsmLexer.h"
00023 #include "llvm/MC/MCParser/MCAsmParser.h"
00024 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
00025 #include "llvm/MC/MCRegisterInfo.h"
00026 #include "llvm/MC/MCStreamer.h"
00027 #include "llvm/MC/MCSubtargetInfo.h"
00028 #include "llvm/MC/MCSymbol.h"
00029 #include "llvm/MC/MCTargetAsmParser.h"
00030 #include "llvm/Support/ErrorHandling.h"
00031 #include "llvm/Support/SourceMgr.h"
00032 #include "llvm/Support/TargetRegistry.h"
00033 #include "llvm/Support/raw_ostream.h"
00034 #include <cstdio>
00035 using namespace llvm;
00036 
00037 namespace {
00038 
00039 class AArch64Operand;
00040 
00041 class AArch64AsmParser : public MCTargetAsmParser {
00042 private:
00043   StringRef Mnemonic; ///< Instruction mnemonic.
00044   MCSubtargetInfo &STI;
00045   MCAsmParser &Parser;
00046 
00047   // Map of register aliases registers via the .req directive.
00048   StringMap<std::pair<bool, unsigned> > RegisterReqs;
00049 
00050   AArch64TargetStreamer &getTargetStreamer() {
00051     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
00052     return static_cast<AArch64TargetStreamer &>(TS);
00053   }
00054 
00055   MCAsmParser &getParser() const { return Parser; }
00056   MCAsmLexer &getLexer() const { return Parser.getLexer(); }
00057 
00058   SMLoc getLoc() const { return Parser.getTok().getLoc(); }
00059 
00060   bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
00061   AArch64CC::CondCode parseCondCodeString(StringRef Cond);
00062   bool parseCondCode(OperandVector &Operands, bool invertCondCode);
00063   unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
00064   int tryParseRegister();
00065   int tryMatchVectorRegister(StringRef &Kind, bool expected);
00066   bool parseRegister(OperandVector &Operands);
00067   bool parseSymbolicImmVal(const MCExpr *&ImmVal);
00068   bool parseVectorList(OperandVector &Operands);
00069   bool parseOperand(OperandVector &Operands, bool isCondCode,
00070                     bool invertCondCode);
00071 
00072   void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
00073   bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
00074   bool showMatchError(SMLoc Loc, unsigned ErrCode);
00075 
00076   bool parseDirectiveWord(unsigned Size, SMLoc L);
00077   bool parseDirectiveTLSDescCall(SMLoc L);
00078 
00079   bool parseDirectiveLOH(StringRef LOH, SMLoc L);
00080   bool parseDirectiveLtorg(SMLoc L);
00081 
00082   bool parseDirectiveReq(StringRef Name, SMLoc L);
00083   bool parseDirectiveUnreq(SMLoc L);
00084 
00085   bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
00086   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
00087                                OperandVector &Operands, MCStreamer &Out,
00088                                uint64_t &ErrorInfo,
00089                                bool MatchingInlineAsm) override;
00090 /// @name Auto-generated Match Functions
00091 /// {
00092 
00093 #define GET_ASSEMBLER_HEADER
00094 #include "AArch64GenAsmMatcher.inc"
00095 
00096   /// }
00097 
00098   OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
00099   OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
00100   OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
00101   OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
00102   OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
00103   OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
00104   OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
00105   OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
00106   OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
00107   OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
00108   OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
00109   bool tryParseVectorRegister(OperandVector &Operands);
00110 
00111 public:
00112   enum AArch64MatchResultTy {
00113     Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
00114 #define GET_OPERAND_DIAGNOSTIC_TYPES
00115 #include "AArch64GenAsmMatcher.inc"
00116   };
00117   AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
00118                  const MCInstrInfo &MII,
00119                  const MCTargetOptions &Options)
00120       : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
00121     MCAsmParserExtension::Initialize(_Parser);
00122     if (Parser.getStreamer().getTargetStreamer() == nullptr)
00123       new AArch64TargetStreamer(Parser.getStreamer());
00124 
00125     // Initialize the set of available features.
00126     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
00127   }
00128 
00129   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
00130                         SMLoc NameLoc, OperandVector &Operands) override;
00131   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
00132   bool ParseDirective(AsmToken DirectiveID) override;
00133   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
00134                                       unsigned Kind) override;
00135 
00136   static bool classifySymbolRef(const MCExpr *Expr,
00137                                 AArch64MCExpr::VariantKind &ELFRefKind,
00138                                 MCSymbolRefExpr::VariantKind &DarwinRefKind,
00139                                 int64_t &Addend);
00140 };
00141 } // end anonymous namespace
00142 
00143 namespace {
00144 
00145 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
00146 /// instruction.
00147 class AArch64Operand : public MCParsedAsmOperand {
00148 private:
00149   enum KindTy {
00150     k_Immediate,
00151     k_ShiftedImm,
00152     k_CondCode,
00153     k_Register,
00154     k_VectorList,
00155     k_VectorIndex,
00156     k_Token,
00157     k_SysReg,
00158     k_SysCR,
00159     k_Prefetch,
00160     k_ShiftExtend,
00161     k_FPImm,
00162     k_Barrier
00163   } Kind;
00164 
00165   SMLoc StartLoc, EndLoc;
00166 
00167   struct TokOp {
00168     const char *Data;
00169     unsigned Length;
00170     bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
00171   };
00172 
00173   struct RegOp {
00174     unsigned RegNum;
00175     bool isVector;
00176   };
00177 
00178   struct VectorListOp {
00179     unsigned RegNum;
00180     unsigned Count;
00181     unsigned NumElements;
00182     unsigned ElementKind;
00183   };
00184 
00185   struct VectorIndexOp {
00186     unsigned Val;
00187   };
00188 
00189   struct ImmOp {
00190     const MCExpr *Val;
00191   };
00192 
00193   struct ShiftedImmOp {
00194     const MCExpr *Val;
00195     unsigned ShiftAmount;
00196   };
00197 
00198   struct CondCodeOp {
00199     AArch64CC::CondCode Code;
00200   };
00201 
00202   struct FPImmOp {
00203     unsigned Val; // Encoded 8-bit representation.
00204   };
00205 
00206   struct BarrierOp {
00207     unsigned Val; // Not the enum since not all values have names.
00208   };
00209 
00210   struct SysRegOp {
00211     const char *Data;
00212     unsigned Length;
00213     uint64_t FeatureBits; // We need to pass through information about which
00214                           // core we are compiling for so that the SysReg
00215                           // Mappers can appropriately conditionalize.
00216   };
00217 
00218   struct SysCRImmOp {
00219     unsigned Val;
00220   };
00221 
00222   struct PrefetchOp {
00223     unsigned Val;
00224   };
00225 
00226   struct ShiftExtendOp {
00227     AArch64_AM::ShiftExtendType Type;
00228     unsigned Amount;
00229     bool HasExplicitAmount;
00230   };
00231 
00232   struct ExtendOp {
00233     unsigned Val;
00234   };
00235 
00236   union {
00237     struct TokOp Tok;
00238     struct RegOp Reg;
00239     struct VectorListOp VectorList;
00240     struct VectorIndexOp VectorIndex;
00241     struct ImmOp Imm;
00242     struct ShiftedImmOp ShiftedImm;
00243     struct CondCodeOp CondCode;
00244     struct FPImmOp FPImm;
00245     struct BarrierOp Barrier;
00246     struct SysRegOp SysReg;
00247     struct SysCRImmOp SysCRImm;
00248     struct PrefetchOp Prefetch;
00249     struct ShiftExtendOp ShiftExtend;
00250   };
00251 
00252   // Keep the MCContext around as the MCExprs may need manipulated during
00253   // the add<>Operands() calls.
00254   MCContext &Ctx;
00255 
00256 public:
00257   AArch64Operand(KindTy K, MCContext &_Ctx)
00258       : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
00259 
00260   AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
00261     Kind = o.Kind;
00262     StartLoc = o.StartLoc;
00263     EndLoc = o.EndLoc;
00264     switch (Kind) {
00265     case k_Token:
00266       Tok = o.Tok;
00267       break;
00268     case k_Immediate:
00269       Imm = o.Imm;
00270       break;
00271     case k_ShiftedImm:
00272       ShiftedImm = o.ShiftedImm;
00273       break;
00274     case k_CondCode:
00275       CondCode = o.CondCode;
00276       break;
00277     case k_FPImm:
00278       FPImm = o.FPImm;
00279       break;
00280     case k_Barrier:
00281       Barrier = o.Barrier;
00282       break;
00283     case k_Register:
00284       Reg = o.Reg;
00285       break;
00286     case k_VectorList:
00287       VectorList = o.VectorList;
00288       break;
00289     case k_VectorIndex:
00290       VectorIndex = o.VectorIndex;
00291       break;
00292     case k_SysReg:
00293       SysReg = o.SysReg;
00294       break;
00295     case k_SysCR:
00296       SysCRImm = o.SysCRImm;
00297       break;
00298     case k_Prefetch:
00299       Prefetch = o.Prefetch;
00300       break;
00301     case k_ShiftExtend:
00302       ShiftExtend = o.ShiftExtend;
00303       break;
00304     }
00305   }
00306 
00307   /// getStartLoc - Get the location of the first token of this operand.
00308   SMLoc getStartLoc() const override { return StartLoc; }
00309   /// getEndLoc - Get the location of the last token of this operand.
00310   SMLoc getEndLoc() const override { return EndLoc; }
00311 
00312   StringRef getToken() const {
00313     assert(Kind == k_Token && "Invalid access!");
00314     return StringRef(Tok.Data, Tok.Length);
00315   }
00316 
00317   bool isTokenSuffix() const {
00318     assert(Kind == k_Token && "Invalid access!");
00319     return Tok.IsSuffix;
00320   }
00321 
00322   const MCExpr *getImm() const {
00323     assert(Kind == k_Immediate && "Invalid access!");
00324     return Imm.Val;
00325   }
00326 
00327   const MCExpr *getShiftedImmVal() const {
00328     assert(Kind == k_ShiftedImm && "Invalid access!");
00329     return ShiftedImm.Val;
00330   }
00331 
00332   unsigned getShiftedImmShift() const {
00333     assert(Kind == k_ShiftedImm && "Invalid access!");
00334     return ShiftedImm.ShiftAmount;
00335   }
00336 
00337   AArch64CC::CondCode getCondCode() const {
00338     assert(Kind == k_CondCode && "Invalid access!");
00339     return CondCode.Code;
00340   }
00341 
00342   unsigned getFPImm() const {
00343     assert(Kind == k_FPImm && "Invalid access!");
00344     return FPImm.Val;
00345   }
00346 
00347   unsigned getBarrier() const {
00348     assert(Kind == k_Barrier && "Invalid access!");
00349     return Barrier.Val;
00350   }
00351 
00352   unsigned getReg() const override {
00353     assert(Kind == k_Register && "Invalid access!");
00354     return Reg.RegNum;
00355   }
00356 
00357   unsigned getVectorListStart() const {
00358     assert(Kind == k_VectorList && "Invalid access!");
00359     return VectorList.RegNum;
00360   }
00361 
00362   unsigned getVectorListCount() const {
00363     assert(Kind == k_VectorList && "Invalid access!");
00364     return VectorList.Count;
00365   }
00366 
00367   unsigned getVectorIndex() const {
00368     assert(Kind == k_VectorIndex && "Invalid access!");
00369     return VectorIndex.Val;
00370   }
00371 
00372   StringRef getSysReg() const {
00373     assert(Kind == k_SysReg && "Invalid access!");
00374     return StringRef(SysReg.Data, SysReg.Length);
00375   }
00376 
00377   uint64_t getSysRegFeatureBits() const {
00378     assert(Kind == k_SysReg && "Invalid access!");
00379     return SysReg.FeatureBits;
00380   }
00381 
00382   unsigned getSysCR() const {
00383     assert(Kind == k_SysCR && "Invalid access!");
00384     return SysCRImm.Val;
00385   }
00386 
00387   unsigned getPrefetch() const {
00388     assert(Kind == k_Prefetch && "Invalid access!");
00389     return Prefetch.Val;
00390   }
00391 
00392   AArch64_AM::ShiftExtendType getShiftExtendType() const {
00393     assert(Kind == k_ShiftExtend && "Invalid access!");
00394     return ShiftExtend.Type;
00395   }
00396 
00397   unsigned getShiftExtendAmount() const {
00398     assert(Kind == k_ShiftExtend && "Invalid access!");
00399     return ShiftExtend.Amount;
00400   }
00401 
00402   bool hasShiftExtendAmount() const {
00403     assert(Kind == k_ShiftExtend && "Invalid access!");
00404     return ShiftExtend.HasExplicitAmount;
00405   }
00406 
00407   bool isImm() const override { return Kind == k_Immediate; }
00408   bool isMem() const override { return false; }
00409   bool isSImm9() const {
00410     if (!isImm())
00411       return false;
00412     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00413     if (!MCE)
00414       return false;
00415     int64_t Val = MCE->getValue();
00416     return (Val >= -256 && Val < 256);
00417   }
00418   bool isSImm7s4() const {
00419     if (!isImm())
00420       return false;
00421     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00422     if (!MCE)
00423       return false;
00424     int64_t Val = MCE->getValue();
00425     return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
00426   }
00427   bool isSImm7s8() const {
00428     if (!isImm())
00429       return false;
00430     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00431     if (!MCE)
00432       return false;
00433     int64_t Val = MCE->getValue();
00434     return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
00435   }
00436   bool isSImm7s16() const {
00437     if (!isImm())
00438       return false;
00439     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00440     if (!MCE)
00441       return false;
00442     int64_t Val = MCE->getValue();
00443     return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
00444   }
00445 
00446   bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
00447     AArch64MCExpr::VariantKind ELFRefKind;
00448     MCSymbolRefExpr::VariantKind DarwinRefKind;
00449     int64_t Addend;
00450     if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
00451                                            Addend)) {
00452       // If we don't understand the expression, assume the best and
00453       // let the fixup and relocation code deal with it.
00454       return true;
00455     }
00456 
00457     if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
00458         ELFRefKind == AArch64MCExpr::VK_LO12 ||
00459         ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
00460         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
00461         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
00462         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
00463         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
00464         ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
00465         ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
00466       // Note that we don't range-check the addend. It's adjusted modulo page
00467       // size when converted, so there is no "out of range" condition when using
00468       // @pageoff.
00469       return Addend >= 0 && (Addend % Scale) == 0;
00470     } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
00471                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
00472       // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
00473       return Addend == 0;
00474     }
00475 
00476     return false;
00477   }
00478 
00479   template <int Scale> bool isUImm12Offset() const {
00480     if (!isImm())
00481       return false;
00482 
00483     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00484     if (!MCE)
00485       return isSymbolicUImm12Offset(getImm(), Scale);
00486 
00487     int64_t Val = MCE->getValue();
00488     return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
00489   }
00490 
00491   bool isImm0_7() const {
00492     if (!isImm())
00493       return false;
00494     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00495     if (!MCE)
00496       return false;
00497     int64_t Val = MCE->getValue();
00498     return (Val >= 0 && Val < 8);
00499   }
00500   bool isImm1_8() const {
00501     if (!isImm())
00502       return false;
00503     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00504     if (!MCE)
00505       return false;
00506     int64_t Val = MCE->getValue();
00507     return (Val > 0 && Val < 9);
00508   }
00509   bool isImm0_15() const {
00510     if (!isImm())
00511       return false;
00512     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00513     if (!MCE)
00514       return false;
00515     int64_t Val = MCE->getValue();
00516     return (Val >= 0 && Val < 16);
00517   }
00518   bool isImm1_16() const {
00519     if (!isImm())
00520       return false;
00521     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00522     if (!MCE)
00523       return false;
00524     int64_t Val = MCE->getValue();
00525     return (Val > 0 && Val < 17);
00526   }
00527   bool isImm0_31() const {
00528     if (!isImm())
00529       return false;
00530     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00531     if (!MCE)
00532       return false;
00533     int64_t Val = MCE->getValue();
00534     return (Val >= 0 && Val < 32);
00535   }
00536   bool isImm1_31() const {
00537     if (!isImm())
00538       return false;
00539     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00540     if (!MCE)
00541       return false;
00542     int64_t Val = MCE->getValue();
00543     return (Val >= 1 && Val < 32);
00544   }
00545   bool isImm1_32() const {
00546     if (!isImm())
00547       return false;
00548     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00549     if (!MCE)
00550       return false;
00551     int64_t Val = MCE->getValue();
00552     return (Val >= 1 && Val < 33);
00553   }
00554   bool isImm0_63() const {
00555     if (!isImm())
00556       return false;
00557     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00558     if (!MCE)
00559       return false;
00560     int64_t Val = MCE->getValue();
00561     return (Val >= 0 && Val < 64);
00562   }
00563   bool isImm1_63() const {
00564     if (!isImm())
00565       return false;
00566     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00567     if (!MCE)
00568       return false;
00569     int64_t Val = MCE->getValue();
00570     return (Val >= 1 && Val < 64);
00571   }
00572   bool isImm1_64() const {
00573     if (!isImm())
00574       return false;
00575     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00576     if (!MCE)
00577       return false;
00578     int64_t Val = MCE->getValue();
00579     return (Val >= 1 && Val < 65);
00580   }
00581   bool isImm0_127() const {
00582     if (!isImm())
00583       return false;
00584     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00585     if (!MCE)
00586       return false;
00587     int64_t Val = MCE->getValue();
00588     return (Val >= 0 && Val < 128);
00589   }
00590   bool isImm0_255() const {
00591     if (!isImm())
00592       return false;
00593     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00594     if (!MCE)
00595       return false;
00596     int64_t Val = MCE->getValue();
00597     return (Val >= 0 && Val < 256);
00598   }
00599   bool isImm0_65535() const {
00600     if (!isImm())
00601       return false;
00602     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00603     if (!MCE)
00604       return false;
00605     int64_t Val = MCE->getValue();
00606     return (Val >= 0 && Val < 65536);
00607   }
00608   bool isImm32_63() const {
00609     if (!isImm())
00610       return false;
00611     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00612     if (!MCE)
00613       return false;
00614     int64_t Val = MCE->getValue();
00615     return (Val >= 32 && Val < 64);
00616   }
00617   bool isLogicalImm32() const {
00618     if (!isImm())
00619       return false;
00620     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00621     if (!MCE)
00622       return false;
00623     int64_t Val = MCE->getValue();
00624     if (Val >> 32 != 0 && Val >> 32 != ~0LL)
00625       return false;
00626     Val &= 0xFFFFFFFF;
00627     return AArch64_AM::isLogicalImmediate(Val, 32);
00628   }
00629   bool isLogicalImm64() const {
00630     if (!isImm())
00631       return false;
00632     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00633     if (!MCE)
00634       return false;
00635     return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
00636   }
00637   bool isLogicalImm32Not() const {
00638     if (!isImm())
00639       return false;
00640     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00641     if (!MCE)
00642       return false;
00643     int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
00644     return AArch64_AM::isLogicalImmediate(Val, 32);
00645   }
00646   bool isLogicalImm64Not() const {
00647     if (!isImm())
00648       return false;
00649     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00650     if (!MCE)
00651       return false;
00652     return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
00653   }
00654   bool isShiftedImm() const { return Kind == k_ShiftedImm; }
00655   bool isAddSubImm() const {
00656     if (!isShiftedImm() && !isImm())
00657       return false;
00658 
00659     const MCExpr *Expr;
00660 
00661     // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
00662     if (isShiftedImm()) {
00663       unsigned Shift = ShiftedImm.ShiftAmount;
00664       Expr = ShiftedImm.Val;
00665       if (Shift != 0 && Shift != 12)
00666         return false;
00667     } else {
00668       Expr = getImm();
00669     }
00670 
00671     AArch64MCExpr::VariantKind ELFRefKind;
00672     MCSymbolRefExpr::VariantKind DarwinRefKind;
00673     int64_t Addend;
00674     if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
00675                                           DarwinRefKind, Addend)) {
00676       return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
00677           || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
00678           || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
00679           || ELFRefKind == AArch64MCExpr::VK_LO12
00680           || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
00681           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
00682           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
00683           || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
00684           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
00685           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
00686           || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
00687     }
00688 
00689     // Otherwise it should be a real immediate in range:
00690     const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
00691     return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
00692   }
00693   bool isCondCode() const { return Kind == k_CondCode; }
00694   bool isSIMDImmType10() const {
00695     if (!isImm())
00696       return false;
00697     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00698     if (!MCE)
00699       return false;
00700     return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
00701   }
00702   bool isBranchTarget26() const {
00703     if (!isImm())
00704       return false;
00705     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00706     if (!MCE)
00707       return true;
00708     int64_t Val = MCE->getValue();
00709     if (Val & 0x3)
00710       return false;
00711     return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
00712   }
00713   bool isPCRelLabel19() const {
00714     if (!isImm())
00715       return false;
00716     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00717     if (!MCE)
00718       return true;
00719     int64_t Val = MCE->getValue();
00720     if (Val & 0x3)
00721       return false;
00722     return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
00723   }
00724   bool isBranchTarget14() const {
00725     if (!isImm())
00726       return false;
00727     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
00728     if (!MCE)
00729       return true;
00730     int64_t Val = MCE->getValue();
00731     if (Val & 0x3)
00732       return false;
00733     return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
00734   }
00735 
00736   bool
00737   isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
00738     if (!isImm())
00739       return false;
00740 
00741     AArch64MCExpr::VariantKind ELFRefKind;
00742     MCSymbolRefExpr::VariantKind DarwinRefKind;
00743     int64_t Addend;
00744     if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
00745                                              DarwinRefKind, Addend)) {
00746       return false;
00747     }
00748     if (DarwinRefKind != MCSymbolRefExpr::VK_None)
00749       return false;
00750 
00751     for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
00752       if (ELFRefKind == AllowedModifiers[i])
00753         return Addend == 0;
00754     }
00755 
00756     return false;
00757   }
00758 
00759   bool isMovZSymbolG3() const {
00760     static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
00761     return isMovWSymbol(Variants);
00762   }
00763 
00764   bool isMovZSymbolG2() const {
00765     static AArch64MCExpr::VariantKind Variants[] = {
00766         AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
00767         AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
00768     return isMovWSymbol(Variants);
00769   }
00770 
00771   bool isMovZSymbolG1() const {
00772     static AArch64MCExpr::VariantKind Variants[] = {
00773         AArch64MCExpr::VK_ABS_G1,      AArch64MCExpr::VK_ABS_G1_S,
00774         AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
00775         AArch64MCExpr::VK_DTPREL_G1,
00776     };
00777     return isMovWSymbol(Variants);
00778   }
00779 
00780   bool isMovZSymbolG0() const {
00781     static AArch64MCExpr::VariantKind Variants[] = {
00782         AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
00783         AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
00784     return isMovWSymbol(Variants);
00785   }
00786 
00787   bool isMovKSymbolG3() const {
00788     static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
00789     return isMovWSymbol(Variants);
00790   }
00791 
00792   bool isMovKSymbolG2() const {
00793     static AArch64MCExpr::VariantKind Variants[] = {
00794         AArch64MCExpr::VK_ABS_G2_NC};
00795     return isMovWSymbol(Variants);
00796   }
00797 
00798   bool isMovKSymbolG1() const {
00799     static AArch64MCExpr::VariantKind Variants[] = {
00800       AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
00801       AArch64MCExpr::VK_DTPREL_G1_NC
00802     };
00803     return isMovWSymbol(Variants);
00804   }
00805 
00806   bool isMovKSymbolG0() const {
00807     static AArch64MCExpr::VariantKind Variants[] = {
00808       AArch64MCExpr::VK_ABS_G0_NC,   AArch64MCExpr::VK_GOTTPREL_G0_NC,
00809       AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
00810     };
00811     return isMovWSymbol(Variants);
00812   }
00813 
00814   template<int RegWidth, int Shift>
00815   bool isMOVZMovAlias() const {
00816     if (!isImm()) return false;
00817 
00818     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
00819     if (!CE) return false;
00820     uint64_t Value = CE->getValue();
00821 
00822     if (RegWidth == 32)
00823       Value &= 0xffffffffULL;
00824 
00825     // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
00826     if (Value == 0 && Shift != 0)
00827       return false;
00828 
00829     return (Value & ~(0xffffULL << Shift)) == 0;
00830   }
00831 
00832   template<int RegWidth, int Shift>
00833   bool isMOVNMovAlias() const {
00834     if (!isImm()) return false;
00835 
00836     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
00837     if (!CE) return false;
00838     uint64_t Value = CE->getValue();
00839 
00840     // MOVZ takes precedence over MOVN.
00841     for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
00842       if ((Value & ~(0xffffULL << MOVZShift)) == 0)
00843         return false;
00844 
00845     Value = ~Value;
00846     if (RegWidth == 32)
00847       Value &= 0xffffffffULL;
00848 
00849     return (Value & ~(0xffffULL << Shift)) == 0;
00850   }
00851 
00852   bool isFPImm() const { return Kind == k_FPImm; }
00853   bool isBarrier() const { return Kind == k_Barrier; }
00854   bool isSysReg() const { return Kind == k_SysReg; }
00855   bool isMRSSystemRegister() const {
00856     if (!isSysReg()) return false;
00857 
00858     bool IsKnownRegister;
00859     auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
00860     Mapper.fromString(getSysReg(), IsKnownRegister);
00861 
00862     return IsKnownRegister;
00863   }
00864   bool isMSRSystemRegister() const {
00865     if (!isSysReg()) return false;
00866 
00867     bool IsKnownRegister;
00868     auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
00869     Mapper.fromString(getSysReg(), IsKnownRegister);
00870 
00871     return IsKnownRegister;
00872   }
00873   bool isSystemPStateField() const {
00874     if (!isSysReg()) return false;
00875 
00876     bool IsKnownRegister;
00877     AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
00878 
00879     return IsKnownRegister;
00880   }
00881   bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
00882   bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
00883   bool isVectorRegLo() const {
00884     return Kind == k_Register && Reg.isVector &&
00885            AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
00886                Reg.RegNum);
00887   }
00888   bool isGPR32as64() const {
00889     return Kind == k_Register && !Reg.isVector &&
00890       AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
00891   }
00892 
00893   bool isGPR64sp0() const {
00894     return Kind == k_Register && !Reg.isVector &&
00895       AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
00896   }
00897 
00898   /// Is this a vector list with the type implicit (presumably attached to the
00899   /// instruction itself)?
00900   template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
00901     return Kind == k_VectorList && VectorList.Count == NumRegs &&
00902            !VectorList.ElementKind;
00903   }
00904 
00905   template <unsigned NumRegs, unsigned NumElements, char ElementKind>
00906   bool isTypedVectorList() const {
00907     if (Kind != k_VectorList)
00908       return false;
00909     if (VectorList.Count != NumRegs)
00910       return false;
00911     if (VectorList.ElementKind != ElementKind)
00912       return false;
00913     return VectorList.NumElements == NumElements;
00914   }
00915 
00916   bool isVectorIndex1() const {
00917     return Kind == k_VectorIndex && VectorIndex.Val == 1;
00918   }
00919   bool isVectorIndexB() const {
00920     return Kind == k_VectorIndex && VectorIndex.Val < 16;
00921   }
00922   bool isVectorIndexH() const {
00923     return Kind == k_VectorIndex && VectorIndex.Val < 8;
00924   }
00925   bool isVectorIndexS() const {
00926     return Kind == k_VectorIndex && VectorIndex.Val < 4;
00927   }
00928   bool isVectorIndexD() const {
00929     return Kind == k_VectorIndex && VectorIndex.Val < 2;
00930   }
00931   bool isToken() const override { return Kind == k_Token; }
00932   bool isTokenEqual(StringRef Str) const {
00933     return Kind == k_Token && getToken() == Str;
00934   }
00935   bool isSysCR() const { return Kind == k_SysCR; }
00936   bool isPrefetch() const { return Kind == k_Prefetch; }
00937   bool isShiftExtend() const { return Kind == k_ShiftExtend; }
00938   bool isShifter() const {
00939     if (!isShiftExtend())
00940       return false;
00941 
00942     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
00943     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
00944             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
00945             ST == AArch64_AM::MSL);
00946   }
00947   bool isExtend() const {
00948     if (!isShiftExtend())
00949       return false;
00950 
00951     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
00952     return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
00953             ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
00954             ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
00955             ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
00956             ET == AArch64_AM::LSL) &&
00957            getShiftExtendAmount() <= 4;
00958   }
00959 
00960   bool isExtend64() const {
00961     if (!isExtend())
00962       return false;
00963     // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
00964     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
00965     return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
00966   }
00967   bool isExtendLSL64() const {
00968     if (!isExtend())
00969       return false;
00970     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
00971     return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
00972             ET == AArch64_AM::LSL) &&
00973            getShiftExtendAmount() <= 4;
00974   }
00975 
00976   template<int Width> bool isMemXExtend() const {
00977     if (!isExtend())
00978       return false;
00979     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
00980     return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
00981            (getShiftExtendAmount() == Log2_32(Width / 8) ||
00982             getShiftExtendAmount() == 0);
00983   }
00984 
00985   template<int Width> bool isMemWExtend() const {
00986     if (!isExtend())
00987       return false;
00988     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
00989     return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
00990            (getShiftExtendAmount() == Log2_32(Width / 8) ||
00991             getShiftExtendAmount() == 0);
00992   }
00993 
00994   template <unsigned width>
00995   bool isArithmeticShifter() const {
00996     if (!isShifter())
00997       return false;
00998 
00999     // An arithmetic shifter is LSL, LSR, or ASR.
01000     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
01001     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
01002             ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
01003   }
01004 
01005   template <unsigned width>
01006   bool isLogicalShifter() const {
01007     if (!isShifter())
01008       return false;
01009 
01010     // A logical shifter is LSL, LSR, ASR or ROR.
01011     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
01012     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
01013             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
01014            getShiftExtendAmount() < width;
01015   }
01016 
01017   bool isMovImm32Shifter() const {
01018     if (!isShifter())
01019       return false;
01020 
01021     // A MOVi shifter is LSL of 0, 16, 32, or 48.
01022     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
01023     if (ST != AArch64_AM::LSL)
01024       return false;
01025     uint64_t Val = getShiftExtendAmount();
01026     return (Val == 0 || Val == 16);
01027   }
01028 
01029   bool isMovImm64Shifter() const {
01030     if (!isShifter())
01031       return false;
01032 
01033     // A MOVi shifter is LSL of 0 or 16.
01034     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
01035     if (ST != AArch64_AM::LSL)
01036       return false;
01037     uint64_t Val = getShiftExtendAmount();
01038     return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
01039   }
01040 
01041   bool isLogicalVecShifter() const {
01042     if (!isShifter())
01043       return false;
01044 
01045     // A logical vector shifter is a left shift by 0, 8, 16, or 24.
01046     unsigned Shift = getShiftExtendAmount();
01047     return getShiftExtendType() == AArch64_AM::LSL &&
01048            (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
01049   }
01050 
01051   bool isLogicalVecHalfWordShifter() const {
01052     if (!isLogicalVecShifter())
01053       return false;
01054 
01055     // A logical vector shifter is a left shift by 0 or 8.
01056     unsigned Shift = getShiftExtendAmount();
01057     return getShiftExtendType() == AArch64_AM::LSL &&
01058            (Shift == 0 || Shift == 8);
01059   }
01060 
01061   bool isMoveVecShifter() const {
01062     if (!isShiftExtend())
01063       return false;
01064 
01065     // A logical vector shifter is a left shift by 8 or 16.
01066     unsigned Shift = getShiftExtendAmount();
01067     return getShiftExtendType() == AArch64_AM::MSL &&
01068            (Shift == 8 || Shift == 16);
01069   }
01070 
01071   // Fallback unscaled operands are for aliases of LDR/STR that fall back
01072   // to LDUR/STUR when the offset is not legal for the former but is for
01073   // the latter. As such, in addition to checking for being a legal unscaled
01074   // address, also check that it is not a legal scaled address. This avoids
01075   // ambiguity in the matcher.
01076   template<int Width>
01077   bool isSImm9OffsetFB() const {
01078     return isSImm9() && !isUImm12Offset<Width / 8>();
01079   }
01080 
01081   bool isAdrpLabel() const {
01082     // Validation was handled during parsing, so we just sanity check that
01083     // something didn't go haywire.
01084     if (!isImm())
01085         return false;
01086 
01087     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
01088       int64_t Val = CE->getValue();
01089       int64_t Min = - (4096 * (1LL << (21 - 1)));
01090       int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
01091       return (Val % 4096) == 0 && Val >= Min && Val <= Max;
01092     }
01093 
01094     return true;
01095   }
01096 
01097   bool isAdrLabel() const {
01098     // Validation was handled during parsing, so we just sanity check that
01099     // something didn't go haywire.
01100     if (!isImm())
01101         return false;
01102 
01103     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
01104       int64_t Val = CE->getValue();
01105       int64_t Min = - (1LL << (21 - 1));
01106       int64_t Max = ((1LL << (21 - 1)) - 1);
01107       return Val >= Min && Val <= Max;
01108     }
01109 
01110     return true;
01111   }
01112 
01113   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
01114     // Add as immediates when possible.  Null MCExpr = 0.
01115     if (!Expr)
01116       Inst.addOperand(MCOperand::CreateImm(0));
01117     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
01118       Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
01119     else
01120       Inst.addOperand(MCOperand::CreateExpr(Expr));
01121   }
01122 
01123   void addRegOperands(MCInst &Inst, unsigned N) const {
01124     assert(N == 1 && "Invalid number of operands!");
01125     Inst.addOperand(MCOperand::CreateReg(getReg()));
01126   }
01127 
01128   void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
01129     assert(N == 1 && "Invalid number of operands!");
01130     assert(
01131         AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
01132 
01133     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
01134     uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
01135         RI->getEncodingValue(getReg()));
01136 
01137     Inst.addOperand(MCOperand::CreateReg(Reg));
01138   }
01139 
01140   void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
01141     assert(N == 1 && "Invalid number of operands!");
01142     assert(
01143         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
01144     Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
01145   }
01146 
01147   void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
01148     assert(N == 1 && "Invalid number of operands!");
01149     assert(
01150         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
01151     Inst.addOperand(MCOperand::CreateReg(getReg()));
01152   }
01153 
01154   void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
01155     assert(N == 1 && "Invalid number of operands!");
01156     Inst.addOperand(MCOperand::CreateReg(getReg()));
01157   }
01158 
01159   template <unsigned NumRegs>
01160   void addVectorList64Operands(MCInst &Inst, unsigned N) const {
01161     assert(N == 1 && "Invalid number of operands!");
01162     static unsigned FirstRegs[] = { AArch64::D0,       AArch64::D0_D1,
01163                                     AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
01164     unsigned FirstReg = FirstRegs[NumRegs - 1];
01165 
01166     Inst.addOperand(
01167         MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
01168   }
01169 
01170   template <unsigned NumRegs>
01171   void addVectorList128Operands(MCInst &Inst, unsigned N) const {
01172     assert(N == 1 && "Invalid number of operands!");
01173     static unsigned FirstRegs[] = { AArch64::Q0,       AArch64::Q0_Q1,
01174                                     AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
01175     unsigned FirstReg = FirstRegs[NumRegs - 1];
01176 
01177     Inst.addOperand(
01178         MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
01179   }
01180 
01181   void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
01182     assert(N == 1 && "Invalid number of operands!");
01183     Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
01184   }
01185 
01186   void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
01187     assert(N == 1 && "Invalid number of operands!");
01188     Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
01189   }
01190 
01191   void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
01192     assert(N == 1 && "Invalid number of operands!");
01193     Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
01194   }
01195 
01196   void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
01197     assert(N == 1 && "Invalid number of operands!");
01198     Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
01199   }
01200 
01201   void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
01202     assert(N == 1 && "Invalid number of operands!");
01203     Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
01204   }
01205 
01206   void addImmOperands(MCInst &Inst, unsigned N) const {
01207     assert(N == 1 && "Invalid number of operands!");
01208     // If this is a pageoff symrefexpr with an addend, adjust the addend
01209     // to be only the page-offset portion. Otherwise, just add the expr
01210     // as-is.
01211     addExpr(Inst, getImm());
01212   }
01213 
01214   void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
01215     assert(N == 2 && "Invalid number of operands!");
01216     if (isShiftedImm()) {
01217       addExpr(Inst, getShiftedImmVal());
01218       Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
01219     } else {
01220       addExpr(Inst, getImm());
01221       Inst.addOperand(MCOperand::CreateImm(0));
01222     }
01223   }
01224 
01225   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
01226     assert(N == 1 && "Invalid number of operands!");
01227     Inst.addOperand(MCOperand::CreateImm(getCondCode()));
01228   }
01229 
01230   void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
01231     assert(N == 1 && "Invalid number of operands!");
01232     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
01233     if (!MCE)
01234       addExpr(Inst, getImm());
01235     else
01236       Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
01237   }
01238 
01239   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
01240     addImmOperands(Inst, N);
01241   }
01242 
01243   template<int Scale>
01244   void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
01245     assert(N == 1 && "Invalid number of operands!");
01246     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
01247 
01248     if (!MCE) {
01249       Inst.addOperand(MCOperand::CreateExpr(getImm()));
01250       return;
01251     }
01252     Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
01253   }
01254 
01255   void addSImm9Operands(MCInst &Inst, unsigned N) const {
01256     assert(N == 1 && "Invalid number of operands!");
01257     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01258     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01259   }
01260 
01261   void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
01262     assert(N == 1 && "Invalid number of operands!");
01263     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01264     Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
01265   }
01266 
01267   void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
01268     assert(N == 1 && "Invalid number of operands!");
01269     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01270     Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
01271   }
01272 
01273   void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
01274     assert(N == 1 && "Invalid number of operands!");
01275     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01276     Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
01277   }
01278 
01279   void addImm0_7Operands(MCInst &Inst, unsigned N) const {
01280     assert(N == 1 && "Invalid number of operands!");
01281     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01282     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01283   }
01284 
01285   void addImm1_8Operands(MCInst &Inst, unsigned N) const {
01286     assert(N == 1 && "Invalid number of operands!");
01287     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01288     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01289   }
01290 
01291   void addImm0_15Operands(MCInst &Inst, unsigned N) const {
01292     assert(N == 1 && "Invalid number of operands!");
01293     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01294     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01295   }
01296 
01297   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
01298     assert(N == 1 && "Invalid number of operands!");
01299     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01300     assert(MCE && "Invalid constant immediate operand!");
01301     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01302   }
01303 
01304   void addImm0_31Operands(MCInst &Inst, unsigned N) const {
01305     assert(N == 1 && "Invalid number of operands!");
01306     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01307     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01308   }
01309 
01310   void addImm1_31Operands(MCInst &Inst, unsigned N) const {
01311     assert(N == 1 && "Invalid number of operands!");
01312     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01313     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01314   }
01315 
01316   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
01317     assert(N == 1 && "Invalid number of operands!");
01318     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01319     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01320   }
01321 
01322   void addImm0_63Operands(MCInst &Inst, unsigned N) const {
01323     assert(N == 1 && "Invalid number of operands!");
01324     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01325     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01326   }
01327 
01328   void addImm1_63Operands(MCInst &Inst, unsigned N) const {
01329     assert(N == 1 && "Invalid number of operands!");
01330     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01331     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01332   }
01333 
01334   void addImm1_64Operands(MCInst &Inst, unsigned N) const {
01335     assert(N == 1 && "Invalid number of operands!");
01336     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01337     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01338   }
01339 
01340   void addImm0_127Operands(MCInst &Inst, unsigned N) const {
01341     assert(N == 1 && "Invalid number of operands!");
01342     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01343     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01344   }
01345 
01346   void addImm0_255Operands(MCInst &Inst, unsigned N) const {
01347     assert(N == 1 && "Invalid number of operands!");
01348     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01349     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01350   }
01351 
01352   void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
01353     assert(N == 1 && "Invalid number of operands!");
01354     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01355     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01356   }
01357 
01358   void addImm32_63Operands(MCInst &Inst, unsigned N) const {
01359     assert(N == 1 && "Invalid number of operands!");
01360     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01361     Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
01362   }
01363 
01364   void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
01365     assert(N == 1 && "Invalid number of operands!");
01366     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01367     uint64_t encoding =
01368         AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
01369     Inst.addOperand(MCOperand::CreateImm(encoding));
01370   }
01371 
01372   void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
01373     assert(N == 1 && "Invalid number of operands!");
01374     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01375     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
01376     Inst.addOperand(MCOperand::CreateImm(encoding));
01377   }
01378 
01379   void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
01380     assert(N == 1 && "Invalid number of operands!");
01381     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01382     int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
01383     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
01384     Inst.addOperand(MCOperand::CreateImm(encoding));
01385   }
01386 
01387   void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
01388     assert(N == 1 && "Invalid number of operands!");
01389     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01390     uint64_t encoding =
01391         AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
01392     Inst.addOperand(MCOperand::CreateImm(encoding));
01393   }
01394 
01395   void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
01396     assert(N == 1 && "Invalid number of operands!");
01397     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
01398     uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
01399     Inst.addOperand(MCOperand::CreateImm(encoding));
01400   }
01401 
01402   void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
01403     // Branch operands don't encode the low bits, so shift them off
01404     // here. If it's a label, however, just put it on directly as there's
01405     // not enough information now to do anything.
01406     assert(N == 1 && "Invalid number of operands!");
01407     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
01408     if (!MCE) {
01409       addExpr(Inst, getImm());
01410       return;
01411     }
01412     assert(MCE && "Invalid constant immediate operand!");
01413     Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
01414   }
01415 
01416   void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
01417     // Branch operands don't encode the low bits, so shift them off
01418     // here. If it's a label, however, just put it on directly as there's
01419     // not enough information now to do anything.
01420     assert(N == 1 && "Invalid number of operands!");
01421     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
01422     if (!MCE) {
01423       addExpr(Inst, getImm());
01424       return;
01425     }
01426     assert(MCE && "Invalid constant immediate operand!");
01427     Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
01428   }
01429 
01430   void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
01431     // Branch operands don't encode the low bits, so shift them off
01432     // here. If it's a label, however, just put it on directly as there's
01433     // not enough information now to do anything.
01434     assert(N == 1 && "Invalid number of operands!");
01435     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
01436     if (!MCE) {
01437       addExpr(Inst, getImm());
01438       return;
01439     }
01440     assert(MCE && "Invalid constant immediate operand!");
01441     Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
01442   }
01443 
01444   void addFPImmOperands(MCInst &Inst, unsigned N) const {
01445     assert(N == 1 && "Invalid number of operands!");
01446     Inst.addOperand(MCOperand::CreateImm(getFPImm()));
01447   }
01448 
01449   void addBarrierOperands(MCInst &Inst, unsigned N) const {
01450     assert(N == 1 && "Invalid number of operands!");
01451     Inst.addOperand(MCOperand::CreateImm(getBarrier()));
01452   }
01453 
01454   void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
01455     assert(N == 1 && "Invalid number of operands!");
01456 
01457     bool Valid;
01458     auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
01459     uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
01460 
01461     Inst.addOperand(MCOperand::CreateImm(Bits));
01462   }
01463 
01464   void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
01465     assert(N == 1 && "Invalid number of operands!");
01466 
01467     bool Valid;
01468     auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
01469     uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
01470 
01471     Inst.addOperand(MCOperand::CreateImm(Bits));
01472   }
01473 
01474   void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
01475     assert(N == 1 && "Invalid number of operands!");
01476 
01477     bool Valid;
01478     uint32_t Bits =
01479         AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
01480 
01481     Inst.addOperand(MCOperand::CreateImm(Bits));
01482   }
01483 
01484   void addSysCROperands(MCInst &Inst, unsigned N) const {
01485     assert(N == 1 && "Invalid number of operands!");
01486     Inst.addOperand(MCOperand::CreateImm(getSysCR()));
01487   }
01488 
01489   void addPrefetchOperands(MCInst &Inst, unsigned N) const {
01490     assert(N == 1 && "Invalid number of operands!");
01491     Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
01492   }
01493 
01494   void addShifterOperands(MCInst &Inst, unsigned N) const {
01495     assert(N == 1 && "Invalid number of operands!");
01496     unsigned Imm =
01497         AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
01498     Inst.addOperand(MCOperand::CreateImm(Imm));
01499   }
01500 
01501   void addExtendOperands(MCInst &Inst, unsigned N) const {
01502     assert(N == 1 && "Invalid number of operands!");
01503     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
01504     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
01505     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
01506     Inst.addOperand(MCOperand::CreateImm(Imm));
01507   }
01508 
01509   void addExtend64Operands(MCInst &Inst, unsigned N) const {
01510     assert(N == 1 && "Invalid number of operands!");
01511     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
01512     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
01513     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
01514     Inst.addOperand(MCOperand::CreateImm(Imm));
01515   }
01516 
01517   void addMemExtendOperands(MCInst &Inst, unsigned N) const {
01518     assert(N == 2 && "Invalid number of operands!");
01519     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
01520     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
01521     Inst.addOperand(MCOperand::CreateImm(IsSigned));
01522     Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
01523   }
01524 
01525   // For 8-bit load/store instructions with a register offset, both the
01526   // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
01527   // they're disambiguated by whether the shift was explicit or implicit rather
01528   // than its size.
01529   void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
01530     assert(N == 2 && "Invalid number of operands!");
01531     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
01532     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
01533     Inst.addOperand(MCOperand::CreateImm(IsSigned));
01534     Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
01535   }
01536 
01537   template<int Shift>
01538   void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
01539     assert(N == 1 && "Invalid number of operands!");
01540 
01541     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
01542     uint64_t Value = CE->getValue();
01543     Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
01544   }
01545 
01546   template<int Shift>
01547   void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
01548     assert(N == 1 && "Invalid number of operands!");
01549 
01550     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
01551     uint64_t Value = CE->getValue();
01552     Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
01553   }
01554 
01555   void print(raw_ostream &OS) const override;
01556 
01557   static std::unique_ptr<AArch64Operand>
01558   CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
01559     auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
01560     Op->Tok.Data = Str.data();
01561     Op->Tok.Length = Str.size();
01562     Op->Tok.IsSuffix = IsSuffix;
01563     Op->StartLoc = S;
01564     Op->EndLoc = S;
01565     return Op;
01566   }
01567 
01568   static std::unique_ptr<AArch64Operand>
01569   CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
01570     auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
01571     Op->Reg.RegNum = RegNum;
01572     Op->Reg.isVector = isVector;
01573     Op->StartLoc = S;
01574     Op->EndLoc = E;
01575     return Op;
01576   }
01577 
01578   static std::unique_ptr<AArch64Operand>
01579   CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
01580                    char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
01581     auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
01582     Op->VectorList.RegNum = RegNum;
01583     Op->VectorList.Count = Count;
01584     Op->VectorList.NumElements = NumElements;
01585     Op->VectorList.ElementKind = ElementKind;
01586     Op->StartLoc = S;
01587     Op->EndLoc = E;
01588     return Op;
01589   }
01590 
01591   static std::unique_ptr<AArch64Operand>
01592   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
01593     auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
01594     Op->VectorIndex.Val = Idx;
01595     Op->StartLoc = S;
01596     Op->EndLoc = E;
01597     return Op;
01598   }
01599 
01600   static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
01601                                                    SMLoc E, MCContext &Ctx) {
01602     auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
01603     Op->Imm.Val = Val;
01604     Op->StartLoc = S;
01605     Op->EndLoc = E;
01606     return Op;
01607   }
01608 
01609   static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
01610                                                           unsigned ShiftAmount,
01611                                                           SMLoc S, SMLoc E,
01612                                                           MCContext &Ctx) {
01613     auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
01614     Op->ShiftedImm .Val = Val;
01615     Op->ShiftedImm.ShiftAmount = ShiftAmount;
01616     Op->StartLoc = S;
01617     Op->EndLoc = E;
01618     return Op;
01619   }
01620 
01621   static std::unique_ptr<AArch64Operand>
01622   CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
01623     auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
01624     Op->CondCode.Code = Code;
01625     Op->StartLoc = S;
01626     Op->EndLoc = E;
01627     return Op;
01628   }
01629 
01630   static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
01631                                                      MCContext &Ctx) {
01632     auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
01633     Op->FPImm.Val = Val;
01634     Op->StartLoc = S;
01635     Op->EndLoc = S;
01636     return Op;
01637   }
01638 
01639   static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
01640                                                        MCContext &Ctx) {
01641     auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
01642     Op->Barrier.Val = Val;
01643     Op->StartLoc = S;
01644     Op->EndLoc = S;
01645     return Op;
01646   }
01647 
01648   static std::unique_ptr<AArch64Operand>
01649   CreateSysReg(StringRef Str, SMLoc S, uint64_t FeatureBits, MCContext &Ctx) {
01650     auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
01651     Op->SysReg.Data = Str.data();
01652     Op->SysReg.Length = Str.size();
01653     Op->SysReg.FeatureBits = FeatureBits;
01654     Op->StartLoc = S;
01655     Op->EndLoc = S;
01656     return Op;
01657   }
01658 
01659   static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
01660                                                      SMLoc E, MCContext &Ctx) {
01661     auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
01662     Op->SysCRImm.Val = Val;
01663     Op->StartLoc = S;
01664     Op->EndLoc = E;
01665     return Op;
01666   }
01667 
01668   static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
01669                                                         MCContext &Ctx) {
01670     auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
01671     Op->Prefetch.Val = Val;
01672     Op->StartLoc = S;
01673     Op->EndLoc = S;
01674     return Op;
01675   }
01676 
01677   static std::unique_ptr<AArch64Operand>
01678   CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
01679                     bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
01680     auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
01681     Op->ShiftExtend.Type = ShOp;
01682     Op->ShiftExtend.Amount = Val;
01683     Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
01684     Op->StartLoc = S;
01685     Op->EndLoc = E;
01686     return Op;
01687   }
01688 };
01689 
01690 } // end anonymous namespace.
01691 
01692 void AArch64Operand::print(raw_ostream &OS) const {
01693   switch (Kind) {
01694   case k_FPImm:
01695     OS << "<fpimm " << getFPImm() << "("
01696        << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
01697     break;
01698   case k_Barrier: {
01699     bool Valid;
01700     StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
01701     if (Valid)
01702       OS << "<barrier " << Name << ">";
01703     else
01704       OS << "<barrier invalid #" << getBarrier() << ">";
01705     break;
01706   }
01707   case k_Immediate:
01708     getImm()->print(OS);
01709     break;
01710   case k_ShiftedImm: {
01711     unsigned Shift = getShiftedImmShift();
01712     OS << "<shiftedimm ";
01713     getShiftedImmVal()->print(OS);
01714     OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
01715     break;
01716   }
01717   case k_CondCode:
01718     OS << "<condcode " << getCondCode() << ">";
01719     break;
01720   case k_Register:
01721     OS << "<register " << getReg() << ">";
01722     break;
01723   case k_VectorList: {
01724     OS << "<vectorlist ";
01725     unsigned Reg = getVectorListStart();
01726     for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
01727       OS << Reg + i << " ";
01728     OS << ">";
01729     break;
01730   }
01731   case k_VectorIndex:
01732     OS << "<vectorindex " << getVectorIndex() << ">";
01733     break;
01734   case k_SysReg:
01735     OS << "<sysreg: " << getSysReg() << '>';
01736     break;
01737   case k_Token:
01738     OS << "'" << getToken() << "'";
01739     break;
01740   case k_SysCR:
01741     OS << "c" << getSysCR();
01742     break;
01743   case k_Prefetch: {
01744     bool Valid;
01745     StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
01746     if (Valid)
01747       OS << "<prfop " << Name << ">";
01748     else
01749       OS << "<prfop invalid #" << getPrefetch() << ">";
01750     break;
01751   }
01752   case k_ShiftExtend: {
01753     OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
01754        << getShiftExtendAmount();
01755     if (!hasShiftExtendAmount())
01756       OS << "<imp>";
01757     OS << '>';
01758     break;
01759   }
01760   }
01761 }
01762 
01763 /// @name Auto-generated Match Functions
01764 /// {
01765 
01766 static unsigned MatchRegisterName(StringRef Name);
01767 
01768 /// }
01769 
01770 static unsigned matchVectorRegName(StringRef Name) {
01771   return StringSwitch<unsigned>(Name)
01772       .Case("v0", AArch64::Q0)
01773       .Case("v1", AArch64::Q1)
01774       .Case("v2", AArch64::Q2)
01775       .Case("v3", AArch64::Q3)
01776       .Case("v4", AArch64::Q4)
01777       .Case("v5", AArch64::Q5)
01778       .Case("v6", AArch64::Q6)
01779       .Case("v7", AArch64::Q7)
01780       .Case("v8", AArch64::Q8)
01781       .Case("v9", AArch64::Q9)
01782       .Case("v10", AArch64::Q10)
01783       .Case("v11", AArch64::Q11)
01784       .Case("v12", AArch64::Q12)
01785       .Case("v13", AArch64::Q13)
01786       .Case("v14", AArch64::Q14)
01787       .Case("v15", AArch64::Q15)
01788       .Case("v16", AArch64::Q16)
01789       .Case("v17", AArch64::Q17)
01790       .Case("v18", AArch64::Q18)
01791       .Case("v19", AArch64::Q19)
01792       .Case("v20", AArch64::Q20)
01793       .Case("v21", AArch64::Q21)
01794       .Case("v22", AArch64::Q22)
01795       .Case("v23", AArch64::Q23)
01796       .Case("v24", AArch64::Q24)
01797       .Case("v25", AArch64::Q25)
01798       .Case("v26", AArch64::Q26)
01799       .Case("v27", AArch64::Q27)
01800       .Case("v28", AArch64::Q28)
01801       .Case("v29", AArch64::Q29)
01802       .Case("v30", AArch64::Q30)
01803       .Case("v31", AArch64::Q31)
01804       .Default(0);
01805 }
01806 
01807 static bool isValidVectorKind(StringRef Name) {
01808   return StringSwitch<bool>(Name.lower())
01809       .Case(".8b", true)
01810       .Case(".16b", true)
01811       .Case(".4h", true)
01812       .Case(".8h", true)
01813       .Case(".2s", true)
01814       .Case(".4s", true)
01815       .Case(".1d", true)
01816       .Case(".2d", true)
01817       .Case(".1q", true)
01818       // Accept the width neutral ones, too, for verbose syntax. If those
01819       // aren't used in the right places, the token operand won't match so
01820       // all will work out.
01821       .Case(".b", true)
01822       .Case(".h", true)
01823       .Case(".s", true)
01824       .Case(".d", true)
01825       .Default(false);
01826 }
01827 
01828 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
01829                                  char &ElementKind) {
01830   assert(isValidVectorKind(Name));
01831 
01832   ElementKind = Name.lower()[Name.size() - 1];
01833   NumElements = 0;
01834 
01835   if (Name.size() == 2)
01836     return;
01837 
01838   // Parse the lane count
01839   Name = Name.drop_front();
01840   while (isdigit(Name.front())) {
01841     NumElements = 10 * NumElements + (Name.front() - '0');
01842     Name = Name.drop_front();
01843   }
01844 }
01845 
01846 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
01847                                      SMLoc &EndLoc) {
01848   StartLoc = getLoc();
01849   RegNo = tryParseRegister();
01850   EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
01851   return (RegNo == (unsigned)-1);
01852 }
01853 
01854 // Matches a register name or register alias previously defined by '.req'
01855 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
01856                                                   bool isVector) {
01857   unsigned RegNum = isVector ? matchVectorRegName(Name)
01858                              : MatchRegisterName(Name);
01859 
01860   if (RegNum == 0) {
01861     // Check for aliases registered via .req. Canonicalize to lower case.
01862     // That's more consistent since register names are case insensitive, and
01863     // it's how the original entry was passed in from MC/MCParser/AsmParser.
01864     auto Entry = RegisterReqs.find(Name.lower());
01865     if (Entry == RegisterReqs.end())
01866       return 0;
01867     // set RegNum if the match is the right kind of register
01868     if (isVector == Entry->getValue().first)
01869       RegNum = Entry->getValue().second;
01870   }
01871   return RegNum;
01872 }
01873 
01874 /// tryParseRegister - Try to parse a register name. The token must be an
01875 /// Identifier when called, and if it is a register name the token is eaten and
01876 /// the register is added to the operand list.
01877 int AArch64AsmParser::tryParseRegister() {
01878   const AsmToken &Tok = Parser.getTok();
01879   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
01880 
01881   std::string lowerCase = Tok.getString().lower();
01882   unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
01883   // Also handle a few aliases of registers.
01884   if (RegNum == 0)
01885     RegNum = StringSwitch<unsigned>(lowerCase)
01886                  .Case("fp",  AArch64::FP)
01887                  .Case("lr",  AArch64::LR)
01888                  .Case("x31", AArch64::XZR)
01889                  .Case("w31", AArch64::WZR)
01890                  .Default(0);
01891 
01892   if (RegNum == 0)
01893     return -1;
01894 
01895   Parser.Lex(); // Eat identifier token.
01896   return RegNum;
01897 }
01898 
01899 /// tryMatchVectorRegister - Try to parse a vector register name with optional
01900 /// kind specifier. If it is a register specifier, eat the token and return it.
01901 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
01902   if (Parser.getTok().isNot(AsmToken::Identifier)) {
01903     TokError("vector register expected");
01904     return -1;
01905   }
01906 
01907   StringRef Name = Parser.getTok().getString();
01908   // If there is a kind specifier, it's separated from the register name by
01909   // a '.'.
01910   size_t Start = 0, Next = Name.find('.');
01911   StringRef Head = Name.slice(Start, Next);
01912   unsigned RegNum = matchRegisterNameAlias(Head, true);
01913 
01914   if (RegNum) {
01915     if (Next != StringRef::npos) {
01916       Kind = Name.slice(Next, StringRef::npos);
01917       if (!isValidVectorKind(Kind)) {
01918         TokError("invalid vector kind qualifier");
01919         return -1;
01920       }
01921     }
01922     Parser.Lex(); // Eat the register token.
01923     return RegNum;
01924   }
01925 
01926   if (expected)
01927     TokError("vector register expected");
01928   return -1;
01929 }
01930 
01931 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
01932 AArch64AsmParser::OperandMatchResultTy
01933 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
01934   SMLoc S = getLoc();
01935 
01936   if (Parser.getTok().isNot(AsmToken::Identifier)) {
01937     Error(S, "Expected cN operand where 0 <= N <= 15");
01938     return MatchOperand_ParseFail;
01939   }
01940 
01941   StringRef Tok = Parser.getTok().getIdentifier();
01942   if (Tok[0] != 'c' && Tok[0] != 'C') {
01943     Error(S, "Expected cN operand where 0 <= N <= 15");
01944     return MatchOperand_ParseFail;
01945   }
01946 
01947   uint32_t CRNum;
01948   bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
01949   if (BadNum || CRNum > 15) {
01950     Error(S, "Expected cN operand where 0 <= N <= 15");
01951     return MatchOperand_ParseFail;
01952   }
01953 
01954   Parser.Lex(); // Eat identifier token.
01955   Operands.push_back(
01956       AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
01957   return MatchOperand_Success;
01958 }
01959 
01960 /// tryParsePrefetch - Try to parse a prefetch operand.
01961 AArch64AsmParser::OperandMatchResultTy
01962 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
01963   SMLoc S = getLoc();
01964   const AsmToken &Tok = Parser.getTok();
01965   // Either an identifier for named values or a 5-bit immediate.
01966   bool Hash = Tok.is(AsmToken::Hash);
01967   if (Hash || Tok.is(AsmToken::Integer)) {
01968     if (Hash)
01969       Parser.Lex(); // Eat hash token.
01970     const MCExpr *ImmVal;
01971     if (getParser().parseExpression(ImmVal))
01972       return MatchOperand_ParseFail;
01973 
01974     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
01975     if (!MCE) {
01976       TokError("immediate value expected for prefetch operand");
01977       return MatchOperand_ParseFail;
01978     }
01979     unsigned prfop = MCE->getValue();
01980     if (prfop > 31) {
01981       TokError("prefetch operand out of range, [0,31] expected");
01982       return MatchOperand_ParseFail;
01983     }
01984 
01985     Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
01986     return MatchOperand_Success;
01987   }
01988 
01989   if (Tok.isNot(AsmToken::Identifier)) {
01990     TokError("pre-fetch hint expected");
01991     return MatchOperand_ParseFail;
01992   }
01993 
01994   bool Valid;
01995   unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
01996   if (!Valid) {
01997     TokError("pre-fetch hint expected");
01998     return MatchOperand_ParseFail;
01999   }
02000 
02001   Parser.Lex(); // Eat identifier token.
02002   Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
02003   return MatchOperand_Success;
02004 }
02005 
02006 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
02007 /// instruction.
02008 AArch64AsmParser::OperandMatchResultTy
02009 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
02010   SMLoc S = getLoc();
02011   const MCExpr *Expr;
02012 
02013   if (Parser.getTok().is(AsmToken::Hash)) {
02014     Parser.Lex(); // Eat hash token.
02015   }
02016 
02017   if (parseSymbolicImmVal(Expr))
02018     return MatchOperand_ParseFail;
02019 
02020   AArch64MCExpr::VariantKind ELFRefKind;
02021   MCSymbolRefExpr::VariantKind DarwinRefKind;
02022   int64_t Addend;
02023   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
02024     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
02025         ELFRefKind == AArch64MCExpr::VK_INVALID) {
02026       // No modifier was specified at all; this is the syntax for an ELF basic
02027       // ADRP relocation (unfortunately).
02028       Expr =
02029           AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
02030     } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
02031                 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
02032                Addend != 0) {
02033       Error(S, "gotpage label reference not allowed an addend");
02034       return MatchOperand_ParseFail;
02035     } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
02036                DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
02037                DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
02038                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
02039                ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
02040                ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
02041       // The operand must be an @page or @gotpage qualified symbolref.
02042       Error(S, "page or gotpage label reference expected");
02043       return MatchOperand_ParseFail;
02044     }
02045   }
02046 
02047   // We have either a label reference possibly with addend or an immediate. The
02048   // addend is a raw value here. The linker will adjust it to only reference the
02049   // page.
02050   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
02051   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
02052 
02053   return MatchOperand_Success;
02054 }
02055 
02056 /// tryParseAdrLabel - Parse and validate a source label for the ADR
02057 /// instruction.
02058 AArch64AsmParser::OperandMatchResultTy
02059 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
02060   SMLoc S = getLoc();
02061   const MCExpr *Expr;
02062 
02063   if (Parser.getTok().is(AsmToken::Hash)) {
02064     Parser.Lex(); // Eat hash token.
02065   }
02066 
02067   if (getParser().parseExpression(Expr))
02068     return MatchOperand_ParseFail;
02069 
02070   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
02071   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
02072 
02073   return MatchOperand_Success;
02074 }
02075 
02076 /// tryParseFPImm - A floating point immediate expression operand.
02077 AArch64AsmParser::OperandMatchResultTy
02078 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
02079   SMLoc S = getLoc();
02080 
02081   bool Hash = false;
02082   if (Parser.getTok().is(AsmToken::Hash)) {
02083     Parser.Lex(); // Eat '#'
02084     Hash = true;
02085   }
02086 
02087   // Handle negation, as that still comes through as a separate token.
02088   bool isNegative = false;
02089   if (Parser.getTok().is(AsmToken::Minus)) {
02090     isNegative = true;
02091     Parser.Lex();
02092   }
02093   const AsmToken &Tok = Parser.getTok();
02094   if (Tok.is(AsmToken::Real)) {
02095     APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
02096     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
02097     // If we had a '-' in front, toggle the sign bit.
02098     IntVal ^= (uint64_t)isNegative << 63;
02099     int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
02100     Parser.Lex(); // Eat the token.
02101     // Check for out of range values. As an exception, we let Zero through,
02102     // as we handle that special case in post-processing before matching in
02103     // order to use the zero register for it.
02104     if (Val == -1 && !RealVal.isZero()) {
02105       TokError("expected compatible register or floating-point constant");
02106       return MatchOperand_ParseFail;
02107     }
02108     Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
02109     return MatchOperand_Success;
02110   }
02111   if (Tok.is(AsmToken::Integer)) {
02112     int64_t Val;
02113     if (!isNegative && Tok.getString().startswith("0x")) {
02114       Val = Tok.getIntVal();
02115       if (Val > 255 || Val < 0) {
02116         TokError("encoded floating point value out of range");
02117         return MatchOperand_ParseFail;
02118       }
02119     } else {
02120       APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
02121       uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
02122       // If we had a '-' in front, toggle the sign bit.
02123       IntVal ^= (uint64_t)isNegative << 63;
02124       Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
02125     }
02126     Parser.Lex(); // Eat the token.
02127     Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
02128     return MatchOperand_Success;
02129   }
02130 
02131   if (!Hash)
02132     return MatchOperand_NoMatch;
02133 
02134   TokError("invalid floating point immediate");
02135   return MatchOperand_ParseFail;
02136 }
02137 
02138 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
02139 AArch64AsmParser::OperandMatchResultTy
02140 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
02141   SMLoc S = getLoc();
02142 
02143   if (Parser.getTok().is(AsmToken::Hash))
02144     Parser.Lex(); // Eat '#'
02145   else if (Parser.getTok().isNot(AsmToken::Integer))
02146     // Operand should start from # or should be integer, emit error otherwise.
02147     return MatchOperand_NoMatch;
02148 
02149   const MCExpr *Imm;
02150   if (parseSymbolicImmVal(Imm))
02151     return MatchOperand_ParseFail;
02152   else if (Parser.getTok().isNot(AsmToken::Comma)) {
02153     uint64_t ShiftAmount = 0;
02154     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
02155     if (MCE) {
02156       int64_t Val = MCE->getValue();
02157       if (Val > 0xfff && (Val & 0xfff) == 0) {
02158         Imm = MCConstantExpr::Create(Val >> 12, getContext());
02159         ShiftAmount = 12;
02160       }
02161     }
02162     SMLoc E = Parser.getTok().getLoc();
02163     Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
02164                                                         getContext()));
02165     return MatchOperand_Success;
02166   }
02167 
02168   // Eat ','
02169   Parser.Lex();
02170 
02171   // The optional operand must be "lsl #N" where N is non-negative.
02172   if (!Parser.getTok().is(AsmToken::Identifier) ||
02173       !Parser.getTok().getIdentifier().equals_lower("lsl")) {
02174     Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
02175     return MatchOperand_ParseFail;
02176   }
02177 
02178   // Eat 'lsl'
02179   Parser.Lex();
02180 
02181   if (Parser.getTok().is(AsmToken::Hash)) {
02182     Parser.Lex();
02183   }
02184 
02185   if (Parser.getTok().isNot(AsmToken::Integer)) {
02186     Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
02187     return MatchOperand_ParseFail;
02188   }
02189 
02190   int64_t ShiftAmount = Parser.getTok().getIntVal();
02191 
02192   if (ShiftAmount < 0) {
02193     Error(Parser.getTok().getLoc(), "positive shift amount required");
02194     return MatchOperand_ParseFail;
02195   }
02196   Parser.Lex(); // Eat the number
02197 
02198   SMLoc E = Parser.getTok().getLoc();
02199   Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
02200                                                       S, E, getContext()));
02201   return MatchOperand_Success;
02202 }
02203 
02204 /// parseCondCodeString - Parse a Condition Code string.
02205 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
02206   AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
02207                     .Case("eq", AArch64CC::EQ)
02208                     .Case("ne", AArch64CC::NE)
02209                     .Case("cs", AArch64CC::HS)
02210                     .Case("hs", AArch64CC::HS)
02211                     .Case("cc", AArch64CC::LO)
02212                     .Case("lo", AArch64CC::LO)
02213                     .Case("mi", AArch64CC::MI)
02214                     .Case("pl", AArch64CC::PL)
02215                     .Case("vs", AArch64CC::VS)
02216                     .Case("vc", AArch64CC::VC)
02217                     .Case("hi", AArch64CC::HI)
02218                     .Case("ls", AArch64CC::LS)
02219                     .Case("ge", AArch64CC::GE)
02220                     .Case("lt", AArch64CC::LT)
02221                     .Case("gt", AArch64CC::GT)
02222                     .Case("le", AArch64CC::LE)
02223                     .Case("al", AArch64CC::AL)
02224                     .Case("nv", AArch64CC::NV)
02225                     .Default(AArch64CC::Invalid);
02226   return CC;
02227 }
02228 
02229 /// parseCondCode - Parse a Condition Code operand.
02230 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
02231                                      bool invertCondCode) {
02232   SMLoc S = getLoc();
02233   const AsmToken &Tok = Parser.getTok();
02234   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
02235 
02236   StringRef Cond = Tok.getString();
02237   AArch64CC::CondCode CC = parseCondCodeString(Cond);
02238   if (CC == AArch64CC::Invalid)
02239     return TokError("invalid condition code");
02240   Parser.Lex(); // Eat identifier token.
02241 
02242   if (invertCondCode) {
02243     if (CC == AArch64CC::AL || CC == AArch64CC::NV)
02244       return TokError("condition codes AL and NV are invalid for this instruction");
02245     CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
02246   }
02247 
02248   Operands.push_back(
02249       AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
02250   return false;
02251 }
02252 
02253 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
02254 /// them if present.
02255 AArch64AsmParser::OperandMatchResultTy
02256 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
02257   const AsmToken &Tok = Parser.getTok();
02258   std::string LowerID = Tok.getString().lower();
02259   AArch64_AM::ShiftExtendType ShOp =
02260       StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
02261           .Case("lsl", AArch64_AM::LSL)
02262           .Case("lsr", AArch64_AM::LSR)
02263           .Case("asr", AArch64_AM::ASR)
02264           .Case("ror", AArch64_AM::ROR)
02265           .Case("msl", AArch64_AM::MSL)
02266           .Case("uxtb", AArch64_AM::UXTB)
02267           .Case("uxth", AArch64_AM::UXTH)
02268           .Case("uxtw", AArch64_AM::UXTW)
02269           .Case("uxtx", AArch64_AM::UXTX)
02270           .Case("sxtb", AArch64_AM::SXTB)
02271           .Case("sxth", AArch64_AM::SXTH)
02272           .Case("sxtw", AArch64_AM::SXTW)
02273           .Case("sxtx", AArch64_AM::SXTX)
02274           .Default(AArch64_AM::InvalidShiftExtend);
02275 
02276   if (ShOp == AArch64_AM::InvalidShiftExtend)
02277     return MatchOperand_NoMatch;
02278 
02279   SMLoc S = Tok.getLoc();
02280   Parser.Lex();
02281 
02282   bool Hash = getLexer().is(AsmToken::Hash);
02283   if (!Hash && getLexer().isNot(AsmToken::Integer)) {
02284     if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
02285         ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
02286         ShOp == AArch64_AM::MSL) {
02287       // We expect a number here.
02288       TokError("expected #imm after shift specifier");
02289       return MatchOperand_ParseFail;
02290     }
02291 
02292     // "extend" type operatoins don't need an immediate, #0 is implicit.
02293     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
02294     Operands.push_back(
02295         AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
02296     return MatchOperand_Success;
02297   }
02298 
02299   if (Hash)
02300     Parser.Lex(); // Eat the '#'.
02301 
02302   // Make sure we do actually have a number
02303   if (!Parser.getTok().is(AsmToken::Integer)) {
02304     Error(Parser.getTok().getLoc(),
02305           "expected integer shift amount");
02306     return MatchOperand_ParseFail;
02307   }
02308 
02309   const MCExpr *ImmVal;
02310   if (getParser().parseExpression(ImmVal))
02311     return MatchOperand_ParseFail;
02312 
02313   const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
02314   if (!MCE) {
02315     TokError("expected #imm after shift specifier");
02316     return MatchOperand_ParseFail;
02317   }
02318 
02319   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
02320   Operands.push_back(AArch64Operand::CreateShiftExtend(
02321       ShOp, MCE->getValue(), true, S, E, getContext()));
02322   return MatchOperand_Success;
02323 }
02324 
02325 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
02326 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
02327 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
02328                                    OperandVector &Operands) {
02329   if (Name.find('.') != StringRef::npos)
02330     return TokError("invalid operand");
02331 
02332   Mnemonic = Name;
02333   Operands.push_back(
02334       AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
02335 
02336   const AsmToken &Tok = Parser.getTok();
02337   StringRef Op = Tok.getString();
02338   SMLoc S = Tok.getLoc();
02339 
02340   const MCExpr *Expr = nullptr;
02341 
02342 #define SYS_ALIAS(op1, Cn, Cm, op2)                                            \
02343   do {                                                                         \
02344     Expr = MCConstantExpr::Create(op1, getContext());                          \
02345     Operands.push_back(                                                        \
02346         AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));           \
02347     Operands.push_back(                                                        \
02348         AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));           \
02349     Operands.push_back(                                                        \
02350         AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));           \
02351     Expr = MCConstantExpr::Create(op2, getContext());                          \
02352     Operands.push_back(                                                        \
02353         AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));           \
02354   } while (0)
02355 
02356   if (Mnemonic == "ic") {
02357     if (!Op.compare_lower("ialluis")) {
02358       // SYS #0, C7, C1, #0
02359       SYS_ALIAS(0, 7, 1, 0);
02360     } else if (!Op.compare_lower("iallu")) {
02361       // SYS #0, C7, C5, #0
02362       SYS_ALIAS(0, 7, 5, 0);
02363     } else if (!Op.compare_lower("ivau")) {
02364       // SYS #3, C7, C5, #1
02365       SYS_ALIAS(3, 7, 5, 1);
02366     } else {
02367       return TokError("invalid operand for IC instruction");
02368     }
02369   } else if (Mnemonic == "dc") {
02370     if (!Op.compare_lower("zva")) {
02371       // SYS #3, C7, C4, #1
02372       SYS_ALIAS(3, 7, 4, 1);
02373     } else if (!Op.compare_lower("ivac")) {
02374       // SYS #3, C7, C6, #1
02375       SYS_ALIAS(0, 7, 6, 1);
02376     } else if (!Op.compare_lower("isw")) {
02377       // SYS #0, C7, C6, #2
02378       SYS_ALIAS(0, 7, 6, 2);
02379     } else if (!Op.compare_lower("cvac")) {
02380       // SYS #3, C7, C10, #1
02381       SYS_ALIAS(3, 7, 10, 1);
02382     } else if (!Op.compare_lower("csw")) {
02383       // SYS #0, C7, C10, #2
02384       SYS_ALIAS(0, 7, 10, 2);
02385     } else if (!Op.compare_lower("cvau")) {
02386       // SYS #3, C7, C11, #1
02387       SYS_ALIAS(3, 7, 11, 1);
02388     } else if (!Op.compare_lower("civac")) {
02389       // SYS #3, C7, C14, #1
02390       SYS_ALIAS(3, 7, 14, 1);
02391     } else if (!Op.compare_lower("cisw")) {
02392       // SYS #0, C7, C14, #2
02393       SYS_ALIAS(0, 7, 14, 2);
02394     } else {
02395       return TokError("invalid operand for DC instruction");
02396     }
02397   } else if (Mnemonic == "at") {
02398     if (!Op.compare_lower("s1e1r")) {
02399       // SYS #0, C7, C8, #0
02400       SYS_ALIAS(0, 7, 8, 0);
02401     } else if (!Op.compare_lower("s1e2r")) {
02402       // SYS #4, C7, C8, #0
02403       SYS_ALIAS(4, 7, 8, 0);
02404     } else if (!Op.compare_lower("s1e3r")) {
02405       // SYS #6, C7, C8, #0
02406       SYS_ALIAS(6, 7, 8, 0);
02407     } else if (!Op.compare_lower("s1e1w")) {
02408       // SYS #0, C7, C8, #1
02409       SYS_ALIAS(0, 7, 8, 1);
02410     } else if (!Op.compare_lower("s1e2w")) {
02411       // SYS #4, C7, C8, #1
02412       SYS_ALIAS(4, 7, 8, 1);
02413     } else if (!Op.compare_lower("s1e3w")) {
02414       // SYS #6, C7, C8, #1
02415       SYS_ALIAS(6, 7, 8, 1);
02416     } else if (!Op.compare_lower("s1e0r")) {
02417       // SYS #0, C7, C8, #3
02418       SYS_ALIAS(0, 7, 8, 2);
02419     } else if (!Op.compare_lower("s1e0w")) {
02420       // SYS #0, C7, C8, #3
02421       SYS_ALIAS(0, 7, 8, 3);
02422     } else if (!Op.compare_lower("s12e1r")) {
02423       // SYS #4, C7, C8, #4
02424       SYS_ALIAS(4, 7, 8, 4);
02425     } else if (!Op.compare_lower("s12e1w")) {
02426       // SYS #4, C7, C8, #5
02427       SYS_ALIAS(4, 7, 8, 5);
02428     } else if (!Op.compare_lower("s12e0r")) {
02429       // SYS #4, C7, C8, #6
02430       SYS_ALIAS(4, 7, 8, 6);
02431     } else if (!Op.compare_lower("s12e0w")) {
02432       // SYS #4, C7, C8, #7
02433       SYS_ALIAS(4, 7, 8, 7);
02434     } else {
02435       return TokError("invalid operand for AT instruction");
02436     }
02437   } else if (Mnemonic == "tlbi") {
02438     if (!Op.compare_lower("vmalle1is")) {
02439       // SYS #0, C8, C3, #0
02440       SYS_ALIAS(0, 8, 3, 0);
02441     } else if (!Op.compare_lower("alle2is")) {
02442       // SYS #4, C8, C3, #0
02443       SYS_ALIAS(4, 8, 3, 0);
02444     } else if (!Op.compare_lower("alle3is")) {
02445       // SYS #6, C8, C3, #0
02446       SYS_ALIAS(6, 8, 3, 0);
02447     } else if (!Op.compare_lower("vae1is")) {
02448       // SYS #0, C8, C3, #1
02449       SYS_ALIAS(0, 8, 3, 1);
02450     } else if (!Op.compare_lower("vae2is")) {
02451       // SYS #4, C8, C3, #1
02452       SYS_ALIAS(4, 8, 3, 1);
02453     } else if (!Op.compare_lower("vae3is")) {
02454       // SYS #6, C8, C3, #1
02455       SYS_ALIAS(6, 8, 3, 1);
02456     } else if (!Op.compare_lower("aside1is")) {
02457       // SYS #0, C8, C3, #2
02458       SYS_ALIAS(0, 8, 3, 2);
02459     } else if (!Op.compare_lower("vaae1is")) {
02460       // SYS #0, C8, C3, #3
02461       SYS_ALIAS(0, 8, 3, 3);
02462     } else if (!Op.compare_lower("alle1is")) {
02463       // SYS #4, C8, C3, #4
02464       SYS_ALIAS(4, 8, 3, 4);
02465     } else if (!Op.compare_lower("vale1is")) {
02466       // SYS #0, C8, C3, #5
02467       SYS_ALIAS(0, 8, 3, 5);
02468     } else if (!Op.compare_lower("vaale1is")) {
02469       // SYS #0, C8, C3, #7
02470       SYS_ALIAS(0, 8, 3, 7);
02471     } else if (!Op.compare_lower("vmalle1")) {
02472       // SYS #0, C8, C7, #0
02473       SYS_ALIAS(0, 8, 7, 0);
02474     } else if (!Op.compare_lower("alle2")) {
02475       // SYS #4, C8, C7, #0
02476       SYS_ALIAS(4, 8, 7, 0);
02477     } else if (!Op.compare_lower("vale2is")) {
02478       // SYS #4, C8, C3, #5
02479       SYS_ALIAS(4, 8, 3, 5);
02480     } else if (!Op.compare_lower("vale3is")) {
02481       // SYS #6, C8, C3, #5
02482       SYS_ALIAS(6, 8, 3, 5);
02483     } else if (!Op.compare_lower("alle3")) {
02484       // SYS #6, C8, C7, #0
02485       SYS_ALIAS(6, 8, 7, 0);
02486     } else if (!Op.compare_lower("vae1")) {
02487       // SYS #0, C8, C7, #1
02488       SYS_ALIAS(0, 8, 7, 1);
02489     } else if (!Op.compare_lower("vae2")) {
02490       // SYS #4, C8, C7, #1
02491       SYS_ALIAS(4, 8, 7, 1);
02492     } else if (!Op.compare_lower("vae3")) {
02493       // SYS #6, C8, C7, #1
02494       SYS_ALIAS(6, 8, 7, 1);
02495     } else if (!Op.compare_lower("aside1")) {
02496       // SYS #0, C8, C7, #2
02497       SYS_ALIAS(0, 8, 7, 2);
02498     } else if (!Op.compare_lower("vaae1")) {
02499       // SYS #0, C8, C7, #3
02500       SYS_ALIAS(0, 8, 7, 3);
02501     } else if (!Op.compare_lower("alle1")) {
02502       // SYS #4, C8, C7, #4
02503       SYS_ALIAS(4, 8, 7, 4);
02504     } else if (!Op.compare_lower("vale1")) {
02505       // SYS #0, C8, C7, #5
02506       SYS_ALIAS(0, 8, 7, 5);
02507     } else if (!Op.compare_lower("vale2")) {
02508       // SYS #4, C8, C7, #5
02509       SYS_ALIAS(4, 8, 7, 5);
02510     } else if (!Op.compare_lower("vale3")) {
02511       // SYS #6, C8, C7, #5
02512       SYS_ALIAS(6, 8, 7, 5);
02513     } else if (!Op.compare_lower("vaale1")) {
02514       // SYS #0, C8, C7, #7
02515       SYS_ALIAS(0, 8, 7, 7);
02516     } else if (!Op.compare_lower("ipas2e1")) {
02517       // SYS #4, C8, C4, #1
02518       SYS_ALIAS(4, 8, 4, 1);
02519     } else if (!Op.compare_lower("ipas2le1")) {
02520       // SYS #4, C8, C4, #5
02521       SYS_ALIAS(4, 8, 4, 5);
02522     } else if (!Op.compare_lower("ipas2e1is")) {
02523       // SYS #4, C8, C4, #1
02524       SYS_ALIAS(4, 8, 0, 1);
02525     } else if (!Op.compare_lower("ipas2le1is")) {
02526       // SYS #4, C8, C4, #5
02527       SYS_ALIAS(4, 8, 0, 5);
02528     } else if (!Op.compare_lower("vmalls12e1")) {
02529       // SYS #4, C8, C7, #6
02530       SYS_ALIAS(4, 8, 7, 6);
02531     } else if (!Op.compare_lower("vmalls12e1is")) {
02532       // SYS #4, C8, C3, #6
02533       SYS_ALIAS(4, 8, 3, 6);
02534     } else {
02535       return TokError("invalid operand for TLBI instruction");
02536     }
02537   }
02538 
02539 #undef SYS_ALIAS
02540 
02541   Parser.Lex(); // Eat operand.
02542 
02543   bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
02544   bool HasRegister = false;
02545 
02546   // Check for the optional register operand.
02547   if (getLexer().is(AsmToken::Comma)) {
02548     Parser.Lex(); // Eat comma.
02549 
02550     if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
02551       return TokError("expected register operand");
02552 
02553     HasRegister = true;
02554   }
02555 
02556   if (getLexer().isNot(AsmToken::EndOfStatement)) {
02557     Parser.eatToEndOfStatement();
02558     return TokError("unexpected token in argument list");
02559   }
02560 
02561   if (ExpectRegister && !HasRegister) {
02562     return TokError("specified " + Mnemonic + " op requires a register");
02563   }
02564   else if (!ExpectRegister && HasRegister) {
02565     return TokError("specified " + Mnemonic + " op does not use a register");
02566   }
02567 
02568   Parser.Lex(); // Consume the EndOfStatement
02569   return false;
02570 }
02571 
02572 AArch64AsmParser::OperandMatchResultTy
02573 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
02574   const AsmToken &Tok = Parser.getTok();
02575 
02576   // Can be either a #imm style literal or an option name
02577   bool Hash = Tok.is(AsmToken::Hash);
02578   if (Hash || Tok.is(AsmToken::Integer)) {
02579     // Immediate operand.
02580     if (Hash)
02581       Parser.Lex(); // Eat the '#'
02582     const MCExpr *ImmVal;
02583     SMLoc ExprLoc = getLoc();
02584     if (getParser().parseExpression(ImmVal))
02585       return MatchOperand_ParseFail;
02586     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
02587     if (!MCE) {
02588       Error(ExprLoc, "immediate value expected for barrier operand");
02589       return MatchOperand_ParseFail;
02590     }
02591     if (MCE->getValue() < 0 || MCE->getValue() > 15) {
02592       Error(ExprLoc, "barrier operand out of range");
02593       return MatchOperand_ParseFail;
02594     }
02595     Operands.push_back(
02596         AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
02597     return MatchOperand_Success;
02598   }
02599 
02600   if (Tok.isNot(AsmToken::Identifier)) {
02601     TokError("invalid operand for instruction");
02602     return MatchOperand_ParseFail;
02603   }
02604 
02605   bool Valid;
02606   unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
02607   if (!Valid) {
02608     TokError("invalid barrier option name");
02609     return MatchOperand_ParseFail;
02610   }
02611 
02612   // The only valid named option for ISB is 'sy'
02613   if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
02614     TokError("'sy' or #imm operand expected");
02615     return MatchOperand_ParseFail;
02616   }
02617 
02618   Operands.push_back(
02619       AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
02620   Parser.Lex(); // Consume the option
02621 
02622   return MatchOperand_Success;
02623 }
02624 
02625 AArch64AsmParser::OperandMatchResultTy
02626 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
02627   const AsmToken &Tok = Parser.getTok();
02628 
02629   if (Tok.isNot(AsmToken::Identifier))
02630     return MatchOperand_NoMatch;
02631 
02632   Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
02633                      STI.getFeatureBits(), getContext()));
02634   Parser.Lex(); // Eat identifier
02635 
02636   return MatchOperand_Success;
02637 }
02638 
02639 /// tryParseVectorRegister - Parse a vector register operand.
02640 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
02641   if (Parser.getTok().isNot(AsmToken::Identifier))
02642     return true;
02643 
02644   SMLoc S = getLoc();
02645   // Check for a vector register specifier first.
02646   StringRef Kind;
02647   int64_t Reg = tryMatchVectorRegister(Kind, false);
02648   if (Reg == -1)
02649     return true;
02650   Operands.push_back(
02651       AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
02652   // If there was an explicit qualifier, that goes on as a literal text
02653   // operand.
02654   if (!Kind.empty())
02655     Operands.push_back(
02656         AArch64Operand::CreateToken(Kind, false, S, getContext()));
02657 
02658   // If there is an index specifier following the register, parse that too.
02659   if (Parser.getTok().is(AsmToken::LBrac)) {
02660     SMLoc SIdx = getLoc();
02661     Parser.Lex(); // Eat left bracket token.
02662 
02663     const MCExpr *ImmVal;
02664     if (getParser().parseExpression(ImmVal))
02665       return false;
02666     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
02667     if (!MCE) {
02668       TokError("immediate value expected for vector index");
02669       return false;
02670     }
02671 
02672     SMLoc E = getLoc();
02673     if (Parser.getTok().isNot(AsmToken::RBrac)) {
02674       Error(E, "']' expected");
02675       return false;
02676     }
02677 
02678     Parser.Lex(); // Eat right bracket token.
02679 
02680     Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
02681                                                          E, getContext()));
02682   }
02683 
02684   return false;
02685 }
02686 
02687 /// parseRegister - Parse a non-vector register operand.
02688 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
02689   SMLoc S = getLoc();
02690   // Try for a vector register.
02691   if (!tryParseVectorRegister(Operands))
02692     return false;
02693 
02694   // Try for a scalar register.
02695   int64_t Reg = tryParseRegister();
02696   if (Reg == -1)
02697     return true;
02698   Operands.push_back(
02699       AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
02700 
02701   // A small number of instructions (FMOVXDhighr, for example) have "[1]"
02702   // as a string token in the instruction itself.
02703   if (getLexer().getKind() == AsmToken::LBrac) {
02704     SMLoc LBracS = getLoc();
02705     Parser.Lex();
02706     const AsmToken &Tok = Parser.getTok();
02707     if (Tok.is(AsmToken::Integer)) {
02708       SMLoc IntS = getLoc();
02709       int64_t Val = Tok.getIntVal();
02710       if (Val == 1) {
02711         Parser.Lex();
02712         if (getLexer().getKind() == AsmToken::RBrac) {
02713           SMLoc RBracS = getLoc();
02714           Parser.Lex();
02715           Operands.push_back(
02716               AArch64Operand::CreateToken("[", false, LBracS, getContext()));
02717           Operands.push_back(
02718               AArch64Operand::CreateToken("1", false, IntS, getContext()));
02719           Operands.push_back(
02720               AArch64Operand::CreateToken("]", false, RBracS, getContext()));
02721           return false;
02722         }
02723       }
02724     }
02725   }
02726 
02727   return false;
02728 }
02729 
02730 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
02731   bool HasELFModifier = false;
02732   AArch64MCExpr::VariantKind RefKind;
02733 
02734   if (Parser.getTok().is(AsmToken::Colon)) {
02735     Parser.Lex(); // Eat ':"
02736     HasELFModifier = true;
02737 
02738     if (Parser.getTok().isNot(AsmToken::Identifier)) {
02739       Error(Parser.getTok().getLoc(),
02740             "expect relocation specifier in operand after ':'");
02741       return true;
02742     }
02743 
02744     std::string LowerCase = Parser.getTok().getIdentifier().lower();
02745     RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
02746                   .Case("lo12", AArch64MCExpr::VK_LO12)
02747                   .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
02748                   .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
02749                   .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
02750                   .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
02751                   .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
02752                   .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
02753                   .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
02754                   .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
02755                   .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
02756                   .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
02757                   .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
02758                   .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
02759                   .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
02760                   .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
02761                   .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
02762                   .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
02763                   .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
02764                   .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
02765                   .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
02766                   .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
02767                   .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
02768                   .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
02769                   .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
02770                   .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
02771                   .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
02772                   .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
02773                   .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
02774                   .Case("got", AArch64MCExpr::VK_GOT_PAGE)
02775                   .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
02776                   .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
02777                   .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
02778                   .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
02779                   .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
02780                   .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
02781                   .Default(AArch64MCExpr::VK_INVALID);
02782 
02783     if (RefKind == AArch64MCExpr::VK_INVALID) {
02784       Error(Parser.getTok().getLoc(),
02785             "expect relocation specifier in operand after ':'");
02786       return true;
02787     }
02788 
02789     Parser.Lex(); // Eat identifier
02790 
02791     if (Parser.getTok().isNot(AsmToken::Colon)) {
02792       Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
02793       return true;
02794     }
02795     Parser.Lex(); // Eat ':'
02796   }
02797 
02798   if (getParser().parseExpression(ImmVal))
02799     return true;
02800 
02801   if (HasELFModifier)
02802     ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
02803 
02804   return false;
02805 }
02806 
02807 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
02808 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
02809   assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
02810   SMLoc S = getLoc();
02811   Parser.Lex(); // Eat left bracket token.
02812   StringRef Kind;
02813   int64_t FirstReg = tryMatchVectorRegister(Kind, true);
02814   if (FirstReg == -1)
02815     return true;
02816   int64_t PrevReg = FirstReg;
02817   unsigned Count = 1;
02818 
02819   if (Parser.getTok().is(AsmToken::Minus)) {
02820     Parser.Lex(); // Eat the minus.
02821 
02822     SMLoc Loc = getLoc();
02823     StringRef NextKind;
02824     int64_t Reg = tryMatchVectorRegister(NextKind, true);
02825     if (Reg == -1)
02826       return true;
02827     // Any Kind suffices must match on all regs in the list.
02828     if (Kind != NextKind)
02829       return Error(Loc, "mismatched register size suffix");
02830 
02831     unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
02832 
02833     if (Space == 0 || Space > 3) {
02834       return Error(Loc, "invalid number of vectors");
02835     }
02836 
02837     Count += Space;
02838   }
02839   else {
02840     while (Parser.getTok().is(AsmToken::Comma)) {
02841       Parser.Lex(); // Eat the comma token.
02842 
02843       SMLoc Loc = getLoc();
02844       StringRef NextKind;
02845       int64_t Reg = tryMatchVectorRegister(NextKind, true);
02846       if (Reg == -1)
02847         return true;
02848       // Any Kind suffices must match on all regs in the list.
02849       if (Kind != NextKind)
02850         return Error(Loc, "mismatched register size suffix");
02851 
02852       // Registers must be incremental (with wraparound at 31)
02853       if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
02854           (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
02855        return Error(Loc, "registers must be sequential");
02856 
02857       PrevReg = Reg;
02858       ++Count;
02859     }
02860   }
02861 
02862   if (Parser.getTok().isNot(AsmToken::RCurly))
02863     return Error(getLoc(), "'}' expected");
02864   Parser.Lex(); // Eat the '}' token.
02865 
02866   if (Count > 4)
02867     return Error(S, "invalid number of vectors");
02868 
02869   unsigned NumElements = 0;
02870   char ElementKind = 0;
02871   if (!Kind.empty())
02872     parseValidVectorKind(Kind, NumElements, ElementKind);
02873 
02874   Operands.push_back(AArch64Operand::CreateVectorList(
02875       FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
02876 
02877   // If there is an index specifier following the list, parse that too.
02878   if (Parser.getTok().is(AsmToken::LBrac)) {
02879     SMLoc SIdx = getLoc();
02880     Parser.Lex(); // Eat left bracket token.
02881 
02882     const MCExpr *ImmVal;
02883     if (getParser().parseExpression(ImmVal))
02884       return false;
02885     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
02886     if (!MCE) {
02887       TokError("immediate value expected for vector index");
02888       return false;
02889     }
02890 
02891     SMLoc E = getLoc();
02892     if (Parser.getTok().isNot(AsmToken::RBrac)) {
02893       Error(E, "']' expected");
02894       return false;
02895     }
02896 
02897     Parser.Lex(); // Eat right bracket token.
02898 
02899     Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
02900                                                          E, getContext()));
02901   }
02902   return false;
02903 }
02904 
02905 AArch64AsmParser::OperandMatchResultTy
02906 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
02907   const AsmToken &Tok = Parser.getTok();
02908   if (!Tok.is(AsmToken::Identifier))
02909     return MatchOperand_NoMatch;
02910 
02911   unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
02912 
02913   MCContext &Ctx = getContext();
02914   const MCRegisterInfo *RI = Ctx.getRegisterInfo();
02915   if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
02916     return MatchOperand_NoMatch;
02917 
02918   SMLoc S = getLoc();
02919   Parser.Lex(); // Eat register
02920 
02921   if (Parser.getTok().isNot(AsmToken::Comma)) {
02922     Operands.push_back(
02923         AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
02924     return MatchOperand_Success;
02925   }
02926   Parser.Lex(); // Eat comma.
02927 
02928   if (Parser.getTok().is(AsmToken::Hash))
02929     Parser.Lex(); // Eat hash
02930 
02931   if (Parser.getTok().isNot(AsmToken::Integer)) {
02932     Error(getLoc(), "index must be absent or #0");
02933     return MatchOperand_ParseFail;
02934   }
02935 
02936   const MCExpr *ImmVal;
02937   if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
02938       cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
02939     Error(getLoc(), "index must be absent or #0");
02940     return MatchOperand_ParseFail;
02941   }
02942 
02943   Operands.push_back(
02944       AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
02945   return MatchOperand_Success;
02946 }
02947 
02948 /// parseOperand - Parse a arm instruction operand.  For now this parses the
02949 /// operand regardless of the mnemonic.
02950 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
02951                                   bool invertCondCode) {
02952   // Check if the current operand has a custom associated parser, if so, try to
02953   // custom parse the operand, or fallback to the general approach.
02954   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
02955   if (ResTy == MatchOperand_Success)
02956     return false;
02957   // If there wasn't a custom match, try the generic matcher below. Otherwise,
02958   // there was a match, but an error occurred, in which case, just return that
02959   // the operand parsing failed.
02960   if (ResTy == MatchOperand_ParseFail)
02961     return true;
02962 
02963   // Nothing custom, so do general case parsing.
02964   SMLoc S, E;
02965   switch (getLexer().getKind()) {
02966   default: {
02967     SMLoc S = getLoc();
02968     const MCExpr *Expr;
02969     if (parseSymbolicImmVal(Expr))
02970       return Error(S, "invalid operand");
02971 
02972     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
02973     Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
02974     return false;
02975   }
02976   case AsmToken::LBrac: {
02977     SMLoc Loc = Parser.getTok().getLoc();
02978     Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
02979                                                    getContext()));
02980     Parser.Lex(); // Eat '['
02981 
02982     // There's no comma after a '[', so we can parse the next operand
02983     // immediately.
02984     return parseOperand(Operands, false, false);
02985   }
02986   case AsmToken::LCurly:
02987     return parseVectorList(Operands);
02988   case AsmToken::Identifier: {
02989     // If we're expecting a Condition Code operand, then just parse that.
02990     if (isCondCode)
02991       return parseCondCode(Operands, invertCondCode);
02992 
02993     // If it's a register name, parse it.
02994     if (!parseRegister(Operands))
02995       return false;
02996 
02997     // This could be an optional "shift" or "extend" operand.
02998     OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
02999     // We can only continue if no tokens were eaten.
03000     if (GotShift != MatchOperand_NoMatch)
03001       return GotShift;
03002 
03003     // This was not a register so parse other operands that start with an
03004     // identifier (like labels) as expressions and create them as immediates.
03005     const MCExpr *IdVal;
03006     S = getLoc();
03007     if (getParser().parseExpression(IdVal))
03008       return true;
03009 
03010     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
03011     Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
03012     return false;
03013   }
03014   case AsmToken::Integer:
03015   case AsmToken::Real:
03016   case AsmToken::Hash: {
03017     // #42 -> immediate.
03018     S = getLoc();
03019     if (getLexer().is(AsmToken::Hash))
03020       Parser.Lex();
03021 
03022     // Parse a negative sign
03023     bool isNegative = false;
03024     if (Parser.getTok().is(AsmToken::Minus)) {
03025       isNegative = true;
03026       // We need to consume this token only when we have a Real, otherwise
03027       // we let parseSymbolicImmVal take care of it
03028       if (Parser.getLexer().peekTok().is(AsmToken::Real))
03029         Parser.Lex();
03030     }
03031 
03032     // The only Real that should come through here is a literal #0.0 for
03033     // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
03034     // so convert the value.
03035     const AsmToken &Tok = Parser.getTok();
03036     if (Tok.is(AsmToken::Real)) {
03037       APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
03038       uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
03039       if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
03040           Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
03041           Mnemonic != "fcmlt")
03042         return TokError("unexpected floating point literal");
03043       else if (IntVal != 0 || isNegative)
03044         return TokError("expected floating-point constant #0.0");
03045       Parser.Lex(); // Eat the token.
03046 
03047       Operands.push_back(
03048           AArch64Operand::CreateToken("#0", false, S, getContext()));
03049       Operands.push_back(
03050           AArch64Operand::CreateToken(".0", false, S, getContext()));
03051       return false;
03052     }
03053 
03054     const MCExpr *ImmVal;
03055     if (parseSymbolicImmVal(ImmVal))
03056       return true;
03057 
03058     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
03059     Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
03060     return false;
03061   }
03062   case AsmToken::Equal: {
03063     SMLoc Loc = Parser.getTok().getLoc();
03064     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
03065       return Error(Loc, "unexpected token in operand");
03066     Parser.Lex(); // Eat '='
03067     const MCExpr *SubExprVal;
03068     if (getParser().parseExpression(SubExprVal))
03069       return true;
03070 
03071     if (Operands.size() < 2 ||
03072         !static_cast<AArch64Operand &>(*Operands[1]).isReg())
03073       return true;
03074 
03075     bool IsXReg =
03076         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
03077             Operands[1]->getReg());
03078 
03079     MCContext& Ctx = getContext();
03080     E = SMLoc::getFromPointer(Loc.getPointer() - 1);
03081     // If the op is an imm and can be fit into a mov, then replace ldr with mov.
03082     if (isa<MCConstantExpr>(SubExprVal)) {
03083       uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
03084       uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
03085       while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
03086         ShiftAmt += 16;
03087         Imm >>= 16;
03088       }
03089       if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
03090           Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
03091           Operands.push_back(AArch64Operand::CreateImm(
03092                      MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
03093         if (ShiftAmt)
03094           Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
03095                      ShiftAmt, true, S, E, Ctx));
03096         return false;
03097       }
03098       APInt Simm = APInt(64, Imm << ShiftAmt);
03099       // check if the immediate is an unsigned or signed 32-bit int for W regs
03100       if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
03101         return Error(Loc, "Immediate too large for register");
03102     }
03103     // If it is a label or an imm that cannot fit in a movz, put it into CP.
03104     const MCExpr *CPLoc =
03105         getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
03106     Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
03107     return false;
03108   }
03109   }
03110 }
03111 
03112 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
03113 /// operands.
03114 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
03115                                         StringRef Name, SMLoc NameLoc,
03116                                         OperandVector &Operands) {
03117   Name = StringSwitch<StringRef>(Name.lower())
03118              .Case("beq", "b.eq")
03119              .Case("bne", "b.ne")
03120              .Case("bhs", "b.hs")
03121              .Case("bcs", "b.cs")
03122              .Case("blo", "b.lo")
03123              .Case("bcc", "b.cc")
03124              .Case("bmi", "b.mi")
03125              .Case("bpl", "b.pl")
03126              .Case("bvs", "b.vs")
03127              .Case("bvc", "b.vc")
03128              .Case("bhi", "b.hi")
03129              .Case("bls", "b.ls")
03130              .Case("bge", "b.ge")
03131              .Case("blt", "b.lt")
03132              .Case("bgt", "b.gt")
03133              .Case("ble", "b.le")
03134              .Case("bal", "b.al")
03135              .Case("bnv", "b.nv")
03136              .Default(Name);
03137 
03138   // First check for the AArch64-specific .req directive.
03139   if (Parser.getTok().is(AsmToken::Identifier) &&
03140       Parser.getTok().getIdentifier() == ".req") {
03141     parseDirectiveReq(Name, NameLoc);
03142     // We always return 'error' for this, as we're done with this
03143     // statement and don't need to match the 'instruction."
03144     return true;
03145   }
03146 
03147   // Create the leading tokens for the mnemonic, split by '.' characters.
03148   size_t Start = 0, Next = Name.find('.');
03149   StringRef Head = Name.slice(Start, Next);
03150 
03151   // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
03152   if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
03153     bool IsError = parseSysAlias(Head, NameLoc, Operands);
03154     if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
03155       Parser.eatToEndOfStatement();
03156     return IsError;
03157   }
03158 
03159   Operands.push_back(
03160       AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
03161   Mnemonic = Head;
03162 
03163   // Handle condition codes for a branch mnemonic
03164   if (Head == "b" && Next != StringRef::npos) {
03165     Start = Next;
03166     Next = Name.find('.', Start + 1);
03167     Head = Name.slice(Start + 1, Next);
03168 
03169     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
03170                                             (Head.data() - Name.data()));
03171     AArch64CC::CondCode CC = parseCondCodeString(Head);
03172     if (CC == AArch64CC::Invalid)
03173       return Error(SuffixLoc, "invalid condition code");
03174     Operands.push_back(
03175         AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
03176     Operands.push_back(
03177         AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
03178   }
03179 
03180   // Add the remaining tokens in the mnemonic.
03181   while (Next != StringRef::npos) {
03182     Start = Next;
03183     Next = Name.find('.', Start + 1);
03184     Head = Name.slice(Start, Next);
03185     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
03186                                             (Head.data() - Name.data()) + 1);
03187     Operands.push_back(
03188         AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
03189   }
03190 
03191   // Conditional compare instructions have a Condition Code operand, which needs
03192   // to be parsed and an immediate operand created.
03193   bool condCodeFourthOperand =
03194       (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
03195        Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
03196        Head == "csinc" || Head == "csinv" || Head == "csneg");
03197 
03198   // These instructions are aliases to some of the conditional select
03199   // instructions. However, the condition code is inverted in the aliased
03200   // instruction.
03201   //
03202   // FIXME: Is this the correct way to handle these? Or should the parser
03203   //        generate the aliased instructions directly?
03204   bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
03205   bool condCodeThirdOperand =
03206       (Head == "cinc" || Head == "cinv" || Head == "cneg");
03207 
03208   // Read the remaining operands.
03209   if (getLexer().isNot(AsmToken::EndOfStatement)) {
03210     // Read the first operand.
03211     if (parseOperand(Operands, false, false)) {
03212       Parser.eatToEndOfStatement();
03213       return true;
03214     }
03215 
03216     unsigned N = 2;
03217     while (getLexer().is(AsmToken::Comma)) {
03218       Parser.Lex(); // Eat the comma.
03219 
03220       // Parse and remember the operand.
03221       if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
03222                                      (N == 3 && condCodeThirdOperand) ||
03223                                      (N == 2 && condCodeSecondOperand),
03224                        condCodeSecondOperand || condCodeThirdOperand)) {
03225         Parser.eatToEndOfStatement();
03226         return true;
03227       }
03228 
03229       // After successfully parsing some operands there are two special cases to
03230       // consider (i.e. notional operands not separated by commas). Both are due
03231       // to memory specifiers:
03232       //  + An RBrac will end an address for load/store/prefetch
03233       //  + An '!' will indicate a pre-indexed operation.
03234       //
03235       // It's someone else's responsibility to make sure these tokens are sane
03236       // in the given context!
03237       if (Parser.getTok().is(AsmToken::RBrac)) {
03238         SMLoc Loc = Parser.getTok().getLoc();
03239         Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
03240                                                        getContext()));
03241         Parser.Lex();
03242       }
03243 
03244       if (Parser.getTok().is(AsmToken::Exclaim)) {
03245         SMLoc Loc = Parser.getTok().getLoc();
03246         Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
03247                                                        getContext()));
03248         Parser.Lex();
03249       }
03250 
03251       ++N;
03252     }
03253   }
03254 
03255   if (getLexer().isNot(AsmToken::EndOfStatement)) {
03256     SMLoc Loc = Parser.getTok().getLoc();
03257     Parser.eatToEndOfStatement();
03258     return Error(Loc, "unexpected token in argument list");
03259   }
03260 
03261   Parser.Lex(); // Consume the EndOfStatement
03262   return false;
03263 }
03264 
03265 // FIXME: This entire function is a giant hack to provide us with decent
03266 // operand range validation/diagnostics until TableGen/MC can be extended
03267 // to support autogeneration of this kind of validation.
03268 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
03269                                          SmallVectorImpl<SMLoc> &Loc) {
03270   const MCRegisterInfo *RI = getContext().getRegisterInfo();
03271   // Check for indexed addressing modes w/ the base register being the
03272   // same as a destination/source register or pair load where
03273   // the Rt == Rt2. All of those are undefined behaviour.
03274   switch (Inst.getOpcode()) {
03275   case AArch64::LDPSWpre:
03276   case AArch64::LDPWpost:
03277   case AArch64::LDPWpre:
03278   case AArch64::LDPXpost:
03279   case AArch64::LDPXpre: {
03280     unsigned Rt = Inst.getOperand(1).getReg();
03281     unsigned Rt2 = Inst.getOperand(2).getReg();
03282     unsigned Rn = Inst.getOperand(3).getReg();
03283     if (RI->isSubRegisterEq(Rn, Rt))
03284       return Error(Loc[0], "unpredictable LDP instruction, writeback base "
03285                            "is also a destination");
03286     if (RI->isSubRegisterEq(Rn, Rt2))
03287       return Error(Loc[1], "unpredictable LDP instruction, writeback base "
03288                            "is also a destination");
03289     // FALLTHROUGH
03290   }
03291   case AArch64::LDPDi:
03292   case AArch64::LDPQi:
03293   case AArch64::LDPSi:
03294   case AArch64::LDPSWi:
03295   case AArch64::LDPWi:
03296   case AArch64::LDPXi: {
03297     unsigned Rt = Inst.getOperand(0).getReg();
03298     unsigned Rt2 = Inst.getOperand(1).getReg();
03299     if (Rt == Rt2)
03300       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
03301     break;
03302   }
03303   case AArch64::LDPDpost:
03304   case AArch64::LDPDpre:
03305   case AArch64::LDPQpost:
03306   case AArch64::LDPQpre:
03307   case AArch64::LDPSpost:
03308   case AArch64::LDPSpre:
03309   case AArch64::LDPSWpost: {
03310     unsigned Rt = Inst.getOperand(1).getReg();
03311     unsigned Rt2 = Inst.getOperand(2).getReg();
03312     if (Rt == Rt2)
03313       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
03314     break;
03315   }
03316   case AArch64::STPDpost:
03317   case AArch64::STPDpre:
03318   case AArch64::STPQpost:
03319   case AArch64::STPQpre:
03320   case AArch64::STPSpost:
03321   case AArch64::STPSpre:
03322   case AArch64::STPWpost:
03323   case AArch64::STPWpre:
03324   case AArch64::STPXpost:
03325   case AArch64::STPXpre: {
03326     unsigned Rt = Inst.getOperand(1).getReg();
03327     unsigned Rt2 = Inst.getOperand(2).getReg();
03328     unsigned Rn = Inst.getOperand(3).getReg();
03329     if (RI->isSubRegisterEq(Rn, Rt))
03330       return Error(Loc[0], "unpredictable STP instruction, writeback base "
03331                            "is also a source");
03332     if (RI->isSubRegisterEq(Rn, Rt2))
03333       return Error(Loc[1], "unpredictable STP instruction, writeback base "
03334                            "is also a source");
03335     break;
03336   }
03337   case AArch64::LDRBBpre:
03338   case AArch64::LDRBpre:
03339   case AArch64::LDRHHpre:
03340   case AArch64::LDRHpre:
03341   case AArch64::LDRSBWpre:
03342   case AArch64::LDRSBXpre:
03343   case AArch64::LDRSHWpre:
03344   case AArch64::LDRSHXpre:
03345   case AArch64::LDRSWpre:
03346   case AArch64::LDRWpre:
03347   case AArch64::LDRXpre:
03348   case AArch64::LDRBBpost:
03349   case AArch64::LDRBpost:
03350   case AArch64::LDRHHpost:
03351   case AArch64::LDRHpost:
03352   case AArch64::LDRSBWpost:
03353   case AArch64::LDRSBXpost:
03354   case AArch64::LDRSHWpost:
03355   case AArch64::LDRSHXpost:
03356   case AArch64::LDRSWpost:
03357   case AArch64::LDRWpost:
03358   case AArch64::LDRXpost: {
03359     unsigned Rt = Inst.getOperand(1).getReg();
03360     unsigned Rn = Inst.getOperand(2).getReg();
03361     if (RI->isSubRegisterEq(Rn, Rt))
03362       return Error(Loc[0], "unpredictable LDR instruction, writeback base "
03363                            "is also a source");
03364     break;
03365   }
03366   case AArch64::STRBBpost:
03367   case AArch64::STRBpost:
03368   case AArch64::STRHHpost:
03369   case AArch64::STRHpost:
03370   case AArch64::STRWpost:
03371   case AArch64::STRXpost:
03372   case AArch64::STRBBpre:
03373   case AArch64::STRBpre:
03374   case AArch64::STRHHpre:
03375   case AArch64::STRHpre:
03376   case AArch64::STRWpre:
03377   case AArch64::STRXpre: {
03378     unsigned Rt = Inst.getOperand(1).getReg();
03379     unsigned Rn = Inst.getOperand(2).getReg();
03380     if (RI->isSubRegisterEq(Rn, Rt))
03381       return Error(Loc[0], "unpredictable STR instruction, writeback base "
03382                            "is also a source");
03383     break;
03384   }
03385   }
03386 
03387   // Now check immediate ranges. Separate from the above as there is overlap
03388   // in the instructions being checked and this keeps the nested conditionals
03389   // to a minimum.
03390   switch (Inst.getOpcode()) {
03391   case AArch64::ADDSWri:
03392   case AArch64::ADDSXri:
03393   case AArch64::ADDWri:
03394   case AArch64::ADDXri:
03395   case AArch64::SUBSWri:
03396   case AArch64::SUBSXri:
03397   case AArch64::SUBWri:
03398   case AArch64::SUBXri: {
03399     // Annoyingly we can't do this in the isAddSubImm predicate, so there is
03400     // some slight duplication here.
03401     if (Inst.getOperand(2).isExpr()) {
03402       const MCExpr *Expr = Inst.getOperand(2).getExpr();
03403       AArch64MCExpr::VariantKind ELFRefKind;
03404       MCSymbolRefExpr::VariantKind DarwinRefKind;
03405       int64_t Addend;
03406       if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
03407         return Error(Loc[2], "invalid immediate expression");
03408       }
03409 
03410       // Only allow these with ADDXri.
03411       if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
03412           DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
03413           Inst.getOpcode() == AArch64::ADDXri)
03414         return false;
03415 
03416       // Only allow these with ADDXri/ADDWri
03417       if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
03418           ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
03419           ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
03420           ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
03421           ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
03422           ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
03423           ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
03424           ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
03425           (Inst.getOpcode() == AArch64::ADDXri ||
03426           Inst.getOpcode() == AArch64::ADDWri))
03427         return false;
03428 
03429       // Don't allow expressions in the immediate field otherwise
03430       return Error(Loc[2], "invalid immediate expression");
03431     }
03432     return false;
03433   }
03434   default:
03435     return false;
03436   }
03437 }
03438 
03439 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
03440   switch (ErrCode) {
03441   case Match_MissingFeature:
03442     return Error(Loc,
03443                  "instruction requires a CPU feature not currently enabled");
03444   case Match_InvalidOperand:
03445     return Error(Loc, "invalid operand for instruction");
03446   case Match_InvalidSuffix:
03447     return Error(Loc, "invalid type suffix for instruction");
03448   case Match_InvalidCondCode:
03449     return Error(Loc, "expected AArch64 condition code");
03450   case Match_AddSubRegExtendSmall:
03451     return Error(Loc,
03452       "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
03453   case Match_AddSubRegExtendLarge:
03454     return Error(Loc,
03455       "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
03456   case Match_AddSubSecondSource:
03457     return Error(Loc,
03458       "expected compatible register, symbol or integer in range [0, 4095]");
03459   case Match_LogicalSecondSource:
03460     return Error(Loc, "expected compatible register or logical immediate");
03461   case Match_InvalidMovImm32Shift:
03462     return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
03463   case Match_InvalidMovImm64Shift:
03464     return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
03465   case Match_AddSubRegShift32:
03466     return Error(Loc,
03467        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
03468   case Match_AddSubRegShift64:
03469     return Error(Loc,
03470        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
03471   case Match_InvalidFPImm:
03472     return Error(Loc,
03473                  "expected compatible register or floating-point constant");
03474   case Match_InvalidMemoryIndexedSImm9:
03475     return Error(Loc, "index must be an integer in range [-256, 255].");
03476   case Match_InvalidMemoryIndexed4SImm7:
03477     return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
03478   case Match_InvalidMemoryIndexed8SImm7:
03479     return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
03480   case Match_InvalidMemoryIndexed16SImm7:
03481     return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
03482   case Match_InvalidMemoryWExtend8:
03483     return Error(Loc,
03484                  "expected 'uxtw' or 'sxtw' with optional shift of #0");
03485   case Match_InvalidMemoryWExtend16:
03486     return Error(Loc,
03487                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
03488   case Match_InvalidMemoryWExtend32:
03489     return Error(Loc,
03490                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
03491   case Match_InvalidMemoryWExtend64:
03492     return Error(Loc,
03493                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
03494   case Match_InvalidMemoryWExtend128:
03495     return Error(Loc,
03496                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
03497   case Match_InvalidMemoryXExtend8:
03498     return Error(Loc,
03499                  "expected 'lsl' or 'sxtx' with optional shift of #0");
03500   case Match_InvalidMemoryXExtend16:
03501     return Error(Loc,
03502                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
03503   case Match_InvalidMemoryXExtend32:
03504     return Error(Loc,
03505                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
03506   case Match_InvalidMemoryXExtend64:
03507     return Error(Loc,
03508                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
03509   case Match_InvalidMemoryXExtend128:
03510     return Error(Loc,
03511                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
03512   case Match_InvalidMemoryIndexed1:
03513     return Error(Loc, "index must be an integer in range [0, 4095].");
03514   case Match_InvalidMemoryIndexed2:
03515     return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
03516   case Match_InvalidMemoryIndexed4:
03517     return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
03518   case Match_InvalidMemoryIndexed8:
03519     return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
03520   case Match_InvalidMemoryIndexed16:
03521     return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
03522   case Match_InvalidImm0_7:
03523     return Error(Loc, "immediate must be an integer in range [0, 7].");
03524   case Match_InvalidImm0_15:
03525     return Error(Loc, "immediate must be an integer in range [0, 15].");
03526   case Match_InvalidImm0_31:
03527     return Error(Loc, "immediate must be an integer in range [0, 31].");
03528   case Match_InvalidImm0_63:
03529     return Error(Loc, "immediate must be an integer in range [0, 63].");
03530   case Match_InvalidImm0_127:
03531     return Error(Loc, "immediate must be an integer in range [0, 127].");
03532   case Match_InvalidImm0_65535:
03533     return Error(Loc, "immediate must be an integer in range [0, 65535].");
03534   case Match_InvalidImm1_8:
03535     return Error(Loc, "immediate must be an integer in range [1, 8].");
03536   case Match_InvalidImm1_16:
03537     return Error(Loc, "immediate must be an integer in range [1, 16].");
03538   case Match_InvalidImm1_32:
03539     return Error(Loc, "immediate must be an integer in range [1, 32].");
03540   case Match_InvalidImm1_64:
03541     return Error(Loc, "immediate must be an integer in range [1, 64].");
03542   case Match_InvalidIndex1:
03543     return Error(Loc, "expected lane specifier '[1]'");
03544   case Match_InvalidIndexB:
03545     return Error(Loc, "vector lane must be an integer in range [0, 15].");
03546   case Match_InvalidIndexH:
03547     return Error(Loc, "vector lane must be an integer in range [0, 7].");
03548   case Match_InvalidIndexS:
03549     return Error(Loc, "vector lane must be an integer in range [0, 3].");
03550   case Match_InvalidIndexD:
03551     return Error(Loc, "vector lane must be an integer in range [0, 1].");
03552   case Match_InvalidLabel:
03553     return Error(Loc, "expected label or encodable integer pc offset");
03554   case Match_MRS:
03555     return Error(Loc, "expected readable system register");
03556   case Match_MSR:
03557     return Error(Loc, "expected writable system register or pstate");
03558   case Match_MnemonicFail:
03559     return Error(Loc, "unrecognized instruction mnemonic");
03560   default:
03561     llvm_unreachable("unexpected error code!");
03562   }
03563 }
03564 
03565 static const char *getSubtargetFeatureName(uint64_t Val);
03566 
03567 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
03568                                                OperandVector &Operands,
03569                                                MCStreamer &Out,
03570                                                uint64_t &ErrorInfo,
03571                                                bool MatchingInlineAsm) {
03572   assert(!Operands.empty() && "Unexpect empty operand list!");
03573   AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
03574   assert(Op.isToken() && "Leading operand should always be a mnemonic!");
03575 
03576   StringRef Tok = Op.getToken();
03577   unsigned NumOperands = Operands.size();
03578 
03579   if (NumOperands == 4 && Tok == "lsl") {
03580     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
03581     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
03582     if (Op2.isReg() && Op3.isImm()) {
03583       const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
03584       if (Op3CE) {
03585         uint64_t Op3Val = Op3CE->getValue();
03586         uint64_t NewOp3Val = 0;
03587         uint64_t NewOp4Val = 0;
03588         if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
03589                 Op2.getReg())) {
03590           NewOp3Val = (32 - Op3Val) & 0x1f;
03591           NewOp4Val = 31 - Op3Val;
03592         } else {
03593           NewOp3Val = (64 - Op3Val) & 0x3f;
03594           NewOp4Val = 63 - Op3Val;
03595         }
03596 
03597         const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
03598         const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
03599 
03600         Operands[0] = AArch64Operand::CreateToken(
03601             "ubfm", false, Op.getStartLoc(), getContext());
03602         Operands.push_back(AArch64Operand::CreateImm(
03603             NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
03604         Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
03605                                                 Op3.getEndLoc(), getContext());
03606       }
03607     }
03608   } else if (NumOperands == 5) {
03609     // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
03610     // UBFIZ -> UBFM aliases.
03611     if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
03612       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
03613       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
03614       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
03615 
03616       if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
03617         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
03618         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
03619 
03620         if (Op3CE && Op4CE) {
03621           uint64_t Op3Val = Op3CE->getValue();
03622           uint64_t Op4Val = Op4CE->getValue();
03623 
03624           uint64_t RegWidth = 0;
03625           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
03626                   Op1.getReg()))
03627             RegWidth = 64;
03628           else
03629             RegWidth = 32;
03630 
03631           if (Op3Val >= RegWidth)
03632             return Error(Op3.getStartLoc(),
03633                          "expected integer in range [0, 31]");
03634           if (Op4Val < 1 || Op4Val > RegWidth)
03635             return Error(Op4.getStartLoc(),
03636                          "expected integer in range [1, 32]");
03637 
03638           uint64_t NewOp3Val = 0;
03639           if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
03640                   Op1.getReg()))
03641             NewOp3Val = (32 - Op3Val) & 0x1f;
03642           else
03643             NewOp3Val = (64 - Op3Val) & 0x3f;
03644 
03645           uint64_t NewOp4Val = Op4Val - 1;
03646 
03647           if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
03648             return Error(Op4.getStartLoc(),
03649                          "requested insert overflows register");
03650 
03651           const MCExpr *NewOp3 =
03652               MCConstantExpr::Create(NewOp3Val, getContext());
03653           const MCExpr *NewOp4 =
03654               MCConstantExpr::Create(NewOp4Val, getContext());
03655           Operands[3] = AArch64Operand::CreateImm(
03656               NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
03657           Operands[4] = AArch64Operand::CreateImm(
03658               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
03659           if (Tok == "bfi")
03660             Operands[0] = AArch64Operand::CreateToken(
03661                 "bfm", false, Op.getStartLoc(), getContext());
03662           else if (Tok == "sbfiz")
03663             Operands[0] = AArch64Operand::CreateToken(
03664                 "sbfm", false, Op.getStartLoc(), getContext());
03665           else if (Tok == "ubfiz")
03666             Operands[0] = AArch64Operand::CreateToken(
03667                 "ubfm", false, Op.getStartLoc(), getContext());
03668           else
03669             llvm_unreachable("No valid mnemonic for alias?");
03670         }
03671       }
03672 
03673       // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
03674       // UBFX -> UBFM aliases.
03675     } else if (NumOperands == 5 &&
03676                (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
03677       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
03678       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
03679       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
03680 
03681       if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
03682         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
03683         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
03684 
03685         if (Op3CE && Op4CE) {
03686           uint64_t Op3Val = Op3CE->getValue();
03687           uint64_t Op4Val = Op4CE->getValue();
03688 
03689           uint64_t RegWidth = 0;
03690           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
03691                   Op1.getReg()))
03692             RegWidth = 64;
03693           else
03694             RegWidth = 32;
03695 
03696           if (Op3Val >= RegWidth)
03697             return Error(Op3.getStartLoc(),
03698                          "expected integer in range [0, 31]");
03699           if (Op4Val < 1 || Op4Val > RegWidth)
03700             return Error(Op4.getStartLoc(),
03701                          "expected integer in range [1, 32]");
03702 
03703           uint64_t NewOp4Val = Op3Val + Op4Val - 1;
03704 
03705           if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
03706             return Error(Op4.getStartLoc(),
03707                          "requested extract overflows register");
03708 
03709           const MCExpr *NewOp4 =
03710               MCConstantExpr::Create(NewOp4Val, getContext());
03711           Operands[4] = AArch64Operand::CreateImm(
03712               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
03713           if (Tok == "bfxil")
03714             Operands[0] = AArch64Operand::CreateToken(
03715                 "bfm", false, Op.getStartLoc(), getContext());
03716           else if (Tok == "sbfx")
03717             Operands[0] = AArch64Operand::CreateToken(
03718                 "sbfm", false, Op.getStartLoc(), getContext());
03719           else if (Tok == "ubfx")
03720             Operands[0] = AArch64Operand::CreateToken(
03721                 "ubfm", false, Op.getStartLoc(), getContext());
03722           else
03723             llvm_unreachable("No valid mnemonic for alias?");
03724         }
03725       }
03726     }
03727   }
03728   // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
03729   //        InstAlias can't quite handle this since the reg classes aren't
03730   //        subclasses.
03731   if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
03732     // The source register can be Wn here, but the matcher expects a
03733     // GPR64. Twiddle it here if necessary.
03734     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
03735     if (Op.isReg()) {
03736       unsigned Reg = getXRegFromWReg(Op.getReg());
03737       Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
03738                                               Op.getEndLoc(), getContext());
03739     }
03740   }
03741   // FIXME: Likewise for sxt[bh] with a Xd dst operand
03742   else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
03743     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
03744     if (Op.isReg() &&
03745         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
03746             Op.getReg())) {
03747       // The source register can be Wn here, but the matcher expects a
03748       // GPR64. Twiddle it here if necessary.
03749       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
03750       if (Op.isReg()) {
03751         unsigned Reg = getXRegFromWReg(Op.getReg());
03752         Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
03753                                                 Op.getEndLoc(), getContext());
03754       }
03755     }
03756   }
03757   // FIXME: Likewise for uxt[bh] with a Xd dst operand
03758   else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
03759     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
03760     if (Op.isReg() &&
03761         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
03762             Op.getReg())) {
03763       // The source register can be Wn here, but the matcher expects a
03764       // GPR32. Twiddle it here if necessary.
03765       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
03766       if (Op.isReg()) {
03767         unsigned Reg = getWRegFromXReg(Op.getReg());
03768         Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
03769                                                 Op.getEndLoc(), getContext());
03770       }
03771     }
03772   }
03773 
03774   // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
03775   if (NumOperands == 3 && Tok == "fmov") {
03776     AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
03777     AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
03778     if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
03779       unsigned zreg =
03780           AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
03781               RegOp.getReg())
03782               ? AArch64::WZR
03783               : AArch64::XZR;
03784       Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
03785                                               Op.getEndLoc(), getContext());
03786     }
03787   }
03788 
03789   MCInst Inst;
03790   // First try to match against the secondary set of tables containing the
03791   // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
03792   unsigned MatchResult =
03793       MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
03794 
03795   // If that fails, try against the alternate table containing long-form NEON:
03796   // "fadd v0.2s, v1.2s, v2.2s"
03797   if (MatchResult != Match_Success)
03798     MatchResult =
03799         MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
03800 
03801   switch (MatchResult) {
03802   case Match_Success: {
03803     // Perform range checking and other semantic validations
03804     SmallVector<SMLoc, 8> OperandLocs;
03805     NumOperands = Operands.size();
03806     for (unsigned i = 1; i < NumOperands; ++i)
03807       OperandLocs.push_back(Operands[i]->getStartLoc());
03808     if (validateInstruction(Inst, OperandLocs))
03809       return true;
03810 
03811     Inst.setLoc(IDLoc);
03812     Out.EmitInstruction(Inst, STI);
03813     return false;
03814   }
03815   case Match_MissingFeature: {
03816     assert(ErrorInfo && "Unknown missing feature!");
03817     // Special case the error message for the very common case where only
03818     // a single subtarget feature is missing (neon, e.g.).
03819     std::string Msg = "instruction requires:";
03820     uint64_t Mask = 1;
03821     for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
03822       if (ErrorInfo & Mask) {
03823         Msg += " ";
03824         Msg += getSubtargetFeatureName(ErrorInfo & Mask);
03825       }
03826       Mask <<= 1;
03827     }
03828     return Error(IDLoc, Msg);
03829   }
03830   case Match_MnemonicFail:
03831     return showMatchError(IDLoc, MatchResult);
03832   case Match_InvalidOperand: {
03833     SMLoc ErrorLoc = IDLoc;
03834     if (ErrorInfo != ~0ULL) {
03835       if (ErrorInfo >= Operands.size())
03836         return Error(IDLoc, "too few operands for instruction");
03837 
03838       ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
03839       if (ErrorLoc == SMLoc())
03840         ErrorLoc = IDLoc;
03841     }
03842     // If the match failed on a suffix token operand, tweak the diagnostic
03843     // accordingly.
03844     if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
03845         ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
03846       MatchResult = Match_InvalidSuffix;
03847 
03848     return showMatchError(ErrorLoc, MatchResult);
03849   }
03850   case Match_InvalidMemoryIndexed1:
03851   case Match_InvalidMemoryIndexed2:
03852   case Match_InvalidMemoryIndexed4:
03853   case Match_InvalidMemoryIndexed8:
03854   case Match_InvalidMemoryIndexed16:
03855   case Match_InvalidCondCode:
03856   case Match_AddSubRegExtendSmall:
03857   case Match_AddSubRegExtendLarge:
03858   case Match_AddSubSecondSource:
03859   case Match_LogicalSecondSource:
03860   case Match_AddSubRegShift32:
03861   case Match_AddSubRegShift64:
03862   case Match_InvalidMovImm32Shift:
03863   case Match_InvalidMovImm64Shift:
03864   case Match_InvalidFPImm:
03865   case Match_InvalidMemoryWExtend8:
03866   case Match_InvalidMemoryWExtend16:
03867   case Match_InvalidMemoryWExtend32:
03868   case Match_InvalidMemoryWExtend64:
03869   case Match_InvalidMemoryWExtend128:
03870   case Match_InvalidMemoryXExtend8:
03871   case Match_InvalidMemoryXExtend16:
03872   case Match_InvalidMemoryXExtend32:
03873   case Match_InvalidMemoryXExtend64:
03874   case Match_InvalidMemoryXExtend128:
03875   case Match_InvalidMemoryIndexed4SImm7:
03876   case Match_InvalidMemoryIndexed8SImm7:
03877   case Match_InvalidMemoryIndexed16SImm7:
03878   case Match_InvalidMemoryIndexedSImm9:
03879   case Match_InvalidImm0_7:
03880   case Match_InvalidImm0_15:
03881   case Match_InvalidImm0_31:
03882   case Match_InvalidImm0_63:
03883   case Match_InvalidImm0_127:
03884   case Match_InvalidImm0_65535:
03885   case Match_InvalidImm1_8:
03886   case Match_InvalidImm1_16:
03887   case Match_InvalidImm1_32:
03888   case Match_InvalidImm1_64:
03889   case Match_InvalidIndex1:
03890   case Match_InvalidIndexB:
03891   case Match_InvalidIndexH:
03892   case Match_InvalidIndexS:
03893   case Match_InvalidIndexD:
03894   case Match_InvalidLabel:
03895   case Match_MSR:
03896   case Match_MRS: {
03897     if (ErrorInfo >= Operands.size())
03898       return Error(IDLoc, "too few operands for instruction");
03899     // Any time we get here, there's nothing fancy to do. Just get the
03900     // operand SMLoc and display the diagnostic.
03901     SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
03902     if (ErrorLoc == SMLoc())
03903       ErrorLoc = IDLoc;
03904     return showMatchError(ErrorLoc, MatchResult);
03905   }
03906   }
03907 
03908   llvm_unreachable("Implement any new match types added!");
03909   return true;
03910 }
03911 
03912 /// ParseDirective parses the arm specific directives
03913 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
03914   StringRef IDVal = DirectiveID.getIdentifier();
03915   SMLoc Loc = DirectiveID.getLoc();
03916   if (IDVal == ".hword")
03917     return parseDirectiveWord(2, Loc);
03918   if (IDVal == ".word")
03919     return parseDirectiveWord(4, Loc);
03920   if (IDVal == ".xword")
03921     return parseDirectiveWord(8, Loc);
03922   if (IDVal == ".tlsdesccall")
03923     return parseDirectiveTLSDescCall(Loc);
03924   if (IDVal == ".ltorg" || IDVal == ".pool")
03925     return parseDirectiveLtorg(Loc);
03926   if (IDVal == ".unreq")
03927     return parseDirectiveUnreq(DirectiveID.getLoc());
03928 
03929   return parseDirectiveLOH(IDVal, Loc);
03930 }
03931 
03932 /// parseDirectiveWord
03933 ///  ::= .word [ expression (, expression)* ]
03934 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
03935   if (getLexer().isNot(AsmToken::EndOfStatement)) {
03936     for (;;) {
03937       const MCExpr *Value;
03938       if (getParser().parseExpression(Value))
03939         return true;
03940 
03941       getParser().getStreamer().EmitValue(Value, Size);
03942 
03943       if (getLexer().is(AsmToken::EndOfStatement))
03944         break;
03945 
03946       // FIXME: Improve diagnostic.
03947       if (getLexer().isNot(AsmToken::Comma))
03948         return Error(L, "unexpected token in directive");
03949       Parser.Lex();
03950     }
03951   }
03952 
03953   Parser.Lex();
03954   return false;
03955 }
03956 
03957 // parseDirectiveTLSDescCall:
03958 //   ::= .tlsdesccall symbol
03959 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
03960   StringRef Name;
03961   if (getParser().parseIdentifier(Name))
03962     return Error(L, "expected symbol after directive");
03963 
03964   MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
03965   const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
03966   Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
03967 
03968   MCInst Inst;
03969   Inst.setOpcode(AArch64::TLSDESCCALL);
03970   Inst.addOperand(MCOperand::CreateExpr(Expr));
03971 
03972   getParser().getStreamer().EmitInstruction(Inst, STI);
03973   return false;
03974 }
03975 
03976 /// ::= .loh <lohName | lohId> label1, ..., labelN
03977 /// The number of arguments depends on the loh identifier.
03978 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
03979   if (IDVal != MCLOHDirectiveName())
03980     return true;
03981   MCLOHType Kind;
03982   if (getParser().getTok().isNot(AsmToken::Identifier)) {
03983     if (getParser().getTok().isNot(AsmToken::Integer))
03984       return TokError("expected an identifier or a number in directive");
03985     // We successfully get a numeric value for the identifier.
03986     // Check if it is valid.
03987     int64_t Id = getParser().getTok().getIntVal();
03988     if (Id <= -1U && !isValidMCLOHType(Id))
03989       return TokError("invalid numeric identifier in directive");
03990     Kind = (MCLOHType)Id;
03991   } else {
03992     StringRef Name = getTok().getIdentifier();
03993     // We successfully parse an identifier.
03994     // Check if it is a recognized one.
03995     int Id = MCLOHNameToId(Name);
03996 
03997     if (Id == -1)
03998       return TokError("invalid identifier in directive");
03999     Kind = (MCLOHType)Id;
04000   }
04001   // Consume the identifier.
04002   Lex();
04003   // Get the number of arguments of this LOH.
04004   int NbArgs = MCLOHIdToNbArgs(Kind);
04005 
04006   assert(NbArgs != -1 && "Invalid number of arguments");
04007 
04008   SmallVector<MCSymbol *, 3> Args;
04009   for (int Idx = 0; Idx < NbArgs; ++Idx) {
04010     StringRef Name;
04011     if (getParser().parseIdentifier(Name))
04012       return TokError("expected identifier in directive");
04013     Args.push_back(getContext().GetOrCreateSymbol(Name));
04014 
04015     if (Idx + 1 == NbArgs)
04016       break;
04017     if (getLexer().isNot(AsmToken::Comma))
04018       return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
04019     Lex();
04020   }
04021   if (getLexer().isNot(AsmToken::EndOfStatement))
04022     return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
04023 
04024   getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
04025   return false;
04026 }
04027 
04028 /// parseDirectiveLtorg
04029 ///  ::= .ltorg | .pool
04030 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
04031   getTargetStreamer().emitCurrentConstantPool();
04032   return false;
04033 }
04034 
04035 /// parseDirectiveReq
04036 ///  ::= name .req registername
04037 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
04038   Parser.Lex(); // Eat the '.req' token.
04039   SMLoc SRegLoc = getLoc();
04040   unsigned RegNum = tryParseRegister();
04041   bool IsVector = false;
04042 
04043   if (RegNum == static_cast<unsigned>(-1)) {
04044     StringRef Kind;
04045     RegNum = tryMatchVectorRegister(Kind, false);
04046     if (!Kind.empty()) {
04047       Error(SRegLoc, "vector register without type specifier expected");
04048       return false;
04049     }
04050     IsVector = true;
04051   }
04052 
04053   if (RegNum == static_cast<unsigned>(-1)) {
04054     Parser.eatToEndOfStatement();
04055     Error(SRegLoc, "register name or alias expected");
04056     return false;
04057   }
04058 
04059   // Shouldn't be anything else.
04060   if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
04061     Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
04062     Parser.eatToEndOfStatement();
04063     return false;
04064   }
04065 
04066   Parser.Lex(); // Consume the EndOfStatement
04067 
04068   auto pair = std::make_pair(IsVector, RegNum);
04069   if (RegisterReqs.GetOrCreateValue(Name, pair).getValue() != pair)
04070     Warning(L, "ignoring redefinition of register alias '" + Name + "'");
04071 
04072   return true;
04073 }
04074 
04075 /// parseDirectiveUneq
04076 ///  ::= .unreq registername
04077 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
04078   if (Parser.getTok().isNot(AsmToken::Identifier)) {
04079     Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
04080     Parser.eatToEndOfStatement();
04081     return false;
04082   }
04083   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
04084   Parser.Lex(); // Eat the identifier.
04085   return false;
04086 }
04087 
04088 bool
04089 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
04090                                     AArch64MCExpr::VariantKind &ELFRefKind,
04091                                     MCSymbolRefExpr::VariantKind &DarwinRefKind,
04092                                     int64_t &Addend) {
04093   ELFRefKind = AArch64MCExpr::VK_INVALID;
04094   DarwinRefKind = MCSymbolRefExpr::VK_None;
04095   Addend = 0;
04096 
04097   if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
04098     ELFRefKind = AE->getKind();
04099     Expr = AE->getSubExpr();
04100   }
04101 
04102   const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
04103   if (SE) {
04104     // It's a simple symbol reference with no addend.
04105     DarwinRefKind = SE->getKind();
04106     return true;
04107   }
04108 
04109   const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
04110   if (!BE)
04111     return false;
04112 
04113   SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
04114   if (!SE)
04115     return false;
04116   DarwinRefKind = SE->getKind();
04117 
04118   if (BE->getOpcode() != MCBinaryExpr::Add &&
04119       BE->getOpcode() != MCBinaryExpr::Sub)
04120     return false;
04121 
04122   // See if the addend is is a constant, otherwise there's more going
04123   // on here than we can deal with.
04124   auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
04125   if (!AddendExpr)
04126     return false;
04127 
04128   Addend = AddendExpr->getValue();
04129   if (BE->getOpcode() == MCBinaryExpr::Sub)
04130     Addend = -Addend;
04131 
04132   // It's some symbol reference + a constant addend, but really
04133   // shouldn't use both Darwin and ELF syntax.
04134   return ELFRefKind == AArch64MCExpr::VK_INVALID ||
04135          DarwinRefKind == MCSymbolRefExpr::VK_None;
04136 }
04137 
04138 /// Force static initialization.
04139 extern "C" void LLVMInitializeAArch64AsmParser() {
04140   RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
04141   RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
04142   RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
04143 }
04144 
04145 #define GET_REGISTER_MATCHER
04146 #define GET_SUBTARGET_FEATURE_NAME
04147 #define GET_MATCHER_IMPLEMENTATION
04148 #include "AArch64GenAsmMatcher.inc"
04149 
04150 // Define this matcher function after the auto-generated include so we
04151 // have the match class enum definitions.
04152 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
04153                                                       unsigned Kind) {
04154   AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
04155   // If the kind is a token for a literal immediate, check if our asm
04156   // operand matches. This is for InstAliases which have a fixed-value
04157   // immediate in the syntax.
04158   int64_t ExpectedVal;
04159   switch (Kind) {
04160   default:
04161     return Match_InvalidOperand;
04162   case MCK__35_0:
04163     ExpectedVal = 0;
04164     break;
04165   case MCK__35_1:
04166     ExpectedVal = 1;
04167     break;
04168   case MCK__35_12:
04169     ExpectedVal = 12;
04170     break;
04171   case MCK__35_16:
04172     ExpectedVal = 16;
04173     break;
04174   case MCK__35_2:
04175     ExpectedVal = 2;
04176     break;
04177   case MCK__35_24:
04178     ExpectedVal = 24;
04179     break;
04180   case MCK__35_3:
04181     ExpectedVal = 3;
04182     break;
04183   case MCK__35_32:
04184     ExpectedVal = 32;
04185     break;
04186   case MCK__35_4:
04187     ExpectedVal = 4;
04188     break;
04189   case MCK__35_48:
04190     ExpectedVal = 48;
04191     break;
04192   case MCK__35_6:
04193     ExpectedVal = 6;
04194     break;
04195   case MCK__35_64:
04196     ExpectedVal = 64;
04197     break;
04198   case MCK__35_8:
04199     ExpectedVal = 8;
04200     break;
04201   }
04202   if (!Op.isImm())
04203     return Match_InvalidOperand;
04204   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
04205   if (!CE)
04206     return Match_InvalidOperand;
04207   if (CE->getValue() == ExpectedVal)
04208     return Match_Success;
04209   return Match_InvalidOperand;
04210 }