LLVM API Documentation
00001 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 00010 #include "ARMFPUName.h" 00011 #include "ARMFeatures.h" 00012 #include "MCTargetDesc/ARMAddressingModes.h" 00013 #include "MCTargetDesc/ARMArchName.h" 00014 #include "MCTargetDesc/ARMBaseInfo.h" 00015 #include "MCTargetDesc/ARMMCExpr.h" 00016 #include "llvm/ADT/STLExtras.h" 00017 #include "llvm/ADT/SmallVector.h" 00018 #include "llvm/ADT/StringExtras.h" 00019 #include "llvm/ADT/StringSwitch.h" 00020 #include "llvm/ADT/Twine.h" 00021 #include "llvm/MC/MCAsmInfo.h" 00022 #include "llvm/MC/MCAssembler.h" 00023 #include "llvm/MC/MCContext.h" 00024 #include "llvm/MC/MCDisassembler.h" 00025 #include "llvm/MC/MCELFStreamer.h" 00026 #include "llvm/MC/MCExpr.h" 00027 #include "llvm/MC/MCInst.h" 00028 #include "llvm/MC/MCInstrDesc.h" 00029 #include "llvm/MC/MCInstrInfo.h" 00030 #include "llvm/MC/MCObjectFileInfo.h" 00031 #include "llvm/MC/MCParser/MCAsmLexer.h" 00032 #include "llvm/MC/MCParser/MCAsmParser.h" 00033 #include "llvm/MC/MCParser/MCParsedAsmOperand.h" 00034 #include "llvm/MC/MCRegisterInfo.h" 00035 #include "llvm/MC/MCSection.h" 00036 #include "llvm/MC/MCStreamer.h" 00037 #include "llvm/MC/MCSubtargetInfo.h" 00038 #include "llvm/MC/MCSymbol.h" 00039 #include "llvm/MC/MCTargetAsmParser.h" 00040 #include "llvm/Support/ARMBuildAttributes.h" 00041 #include "llvm/Support/ARMEHABI.h" 00042 #include "llvm/Support/COFF.h" 00043 #include "llvm/Support/Debug.h" 00044 #include "llvm/Support/ELF.h" 00045 #include "llvm/Support/MathExtras.h" 00046 #include "llvm/Support/SourceMgr.h" 00047 #include "llvm/Support/TargetRegistry.h" 00048 #include "llvm/Support/raw_ostream.h" 00049 00050 using namespace llvm; 00051 00052 namespace { 00053 00054 class ARMOperand; 00055 00056 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane }; 00057 00058 class UnwindContext { 00059 MCAsmParser &Parser; 00060 00061 typedef SmallVector<SMLoc, 4> Locs; 00062 00063 Locs FnStartLocs; 00064 Locs CantUnwindLocs; 00065 Locs PersonalityLocs; 00066 Locs PersonalityIndexLocs; 00067 Locs HandlerDataLocs; 00068 int FPReg; 00069 00070 public: 00071 UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {} 00072 00073 bool hasFnStart() const { return !FnStartLocs.empty(); } 00074 bool cantUnwind() const { return !CantUnwindLocs.empty(); } 00075 bool hasHandlerData() const { return !HandlerDataLocs.empty(); } 00076 bool hasPersonality() const { 00077 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty()); 00078 } 00079 00080 void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); } 00081 void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); } 00082 void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); } 00083 void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); } 00084 void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); } 00085 00086 void saveFPReg(int Reg) { FPReg = Reg; } 00087 int getFPReg() const { return FPReg; } 00088 00089 void emitFnStartLocNotes() const { 00090 for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end(); 00091 FI != FE; ++FI) 00092 Parser.Note(*FI, ".fnstart was specified here"); 00093 } 00094 void emitCantUnwindLocNotes() const { 00095 for (Locs::const_iterator UI = CantUnwindLocs.begin(), 00096 UE = CantUnwindLocs.end(); UI != UE; ++UI) 00097 Parser.Note(*UI, ".cantunwind was specified here"); 00098 } 00099 void emitHandlerDataLocNotes() const { 00100 for (Locs::const_iterator HI = HandlerDataLocs.begin(), 00101 HE = HandlerDataLocs.end(); HI != HE; ++HI) 00102 Parser.Note(*HI, ".handlerdata was specified here"); 00103 } 00104 void emitPersonalityLocNotes() const { 00105 for (Locs::const_iterator PI = PersonalityLocs.begin(), 00106 PE = PersonalityLocs.end(), 00107 PII = PersonalityIndexLocs.begin(), 00108 PIE = PersonalityIndexLocs.end(); 00109 PI != PE || PII != PIE;) { 00110 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer())) 00111 Parser.Note(*PI++, ".personality was specified here"); 00112 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer())) 00113 Parser.Note(*PII++, ".personalityindex was specified here"); 00114 else 00115 llvm_unreachable(".personality and .personalityindex cannot be " 00116 "at the same location"); 00117 } 00118 } 00119 00120 void reset() { 00121 FnStartLocs = Locs(); 00122 CantUnwindLocs = Locs(); 00123 PersonalityLocs = Locs(); 00124 HandlerDataLocs = Locs(); 00125 PersonalityIndexLocs = Locs(); 00126 FPReg = ARM::SP; 00127 } 00128 }; 00129 00130 class ARMAsmParser : public MCTargetAsmParser { 00131 MCSubtargetInfo &STI; 00132 MCAsmParser &Parser; 00133 const MCInstrInfo &MII; 00134 const MCRegisterInfo *MRI; 00135 UnwindContext UC; 00136 00137 ARMTargetStreamer &getTargetStreamer() { 00138 assert(getParser().getStreamer().getTargetStreamer() && 00139 "do not have a target streamer"); 00140 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); 00141 return static_cast<ARMTargetStreamer &>(TS); 00142 } 00143 00144 // Map of register aliases registers via the .req directive. 00145 StringMap<unsigned> RegisterReqs; 00146 00147 bool NextSymbolIsThumb; 00148 00149 struct { 00150 ARMCC::CondCodes Cond; // Condition for IT block. 00151 unsigned Mask:4; // Condition mask for instructions. 00152 // Starting at first 1 (from lsb). 00153 // '1' condition as indicated in IT. 00154 // '0' inverse of condition (else). 00155 // Count of instructions in IT block is 00156 // 4 - trailingzeroes(mask) 00157 00158 bool FirstCond; // Explicit flag for when we're parsing the 00159 // First instruction in the IT block. It's 00160 // implied in the mask, so needs special 00161 // handling. 00162 00163 unsigned CurPosition; // Current position in parsing of IT 00164 // block. In range [0,3]. Initialized 00165 // according to count of instructions in block. 00166 // ~0U if no active IT block. 00167 } ITState; 00168 bool inITBlock() { return ITState.CurPosition != ~0U;} 00169 void forwardITPosition() { 00170 if (!inITBlock()) return; 00171 // Move to the next instruction in the IT block, if there is one. If not, 00172 // mark the block as done. 00173 unsigned TZ = countTrailingZeros(ITState.Mask); 00174 if (++ITState.CurPosition == 5 - TZ) 00175 ITState.CurPosition = ~0U; // Done with the IT block after this. 00176 } 00177 00178 00179 MCAsmParser &getParser() const { return Parser; } 00180 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 00181 00182 void Note(SMLoc L, const Twine &Msg, ArrayRef<SMRange> Ranges = None) { 00183 return Parser.Note(L, Msg, Ranges); 00184 } 00185 bool Warning(SMLoc L, const Twine &Msg, 00186 ArrayRef<SMRange> Ranges = None) { 00187 return Parser.Warning(L, Msg, Ranges); 00188 } 00189 bool Error(SMLoc L, const Twine &Msg, 00190 ArrayRef<SMRange> Ranges = None) { 00191 return Parser.Error(L, Msg, Ranges); 00192 } 00193 00194 int tryParseRegister(); 00195 bool tryParseRegisterWithWriteBack(OperandVector &); 00196 int tryParseShiftRegister(OperandVector &); 00197 bool parseRegisterList(OperandVector &); 00198 bool parseMemory(OperandVector &); 00199 bool parseOperand(OperandVector &, StringRef Mnemonic); 00200 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 00201 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 00202 unsigned &ShiftAmount); 00203 bool parseLiteralValues(unsigned Size, SMLoc L); 00204 bool parseDirectiveThumb(SMLoc L); 00205 bool parseDirectiveARM(SMLoc L); 00206 bool parseDirectiveThumbFunc(SMLoc L); 00207 bool parseDirectiveCode(SMLoc L); 00208 bool parseDirectiveSyntax(SMLoc L); 00209 bool parseDirectiveReq(StringRef Name, SMLoc L); 00210 bool parseDirectiveUnreq(SMLoc L); 00211 bool parseDirectiveArch(SMLoc L); 00212 bool parseDirectiveEabiAttr(SMLoc L); 00213 bool parseDirectiveCPU(SMLoc L); 00214 bool parseDirectiveFPU(SMLoc L); 00215 bool parseDirectiveFnStart(SMLoc L); 00216 bool parseDirectiveFnEnd(SMLoc L); 00217 bool parseDirectiveCantUnwind(SMLoc L); 00218 bool parseDirectivePersonality(SMLoc L); 00219 bool parseDirectiveHandlerData(SMLoc L); 00220 bool parseDirectiveSetFP(SMLoc L); 00221 bool parseDirectivePad(SMLoc L); 00222 bool parseDirectiveRegSave(SMLoc L, bool IsVector); 00223 bool parseDirectiveInst(SMLoc L, char Suffix = '\0'); 00224 bool parseDirectiveLtorg(SMLoc L); 00225 bool parseDirectiveEven(SMLoc L); 00226 bool parseDirectivePersonalityIndex(SMLoc L); 00227 bool parseDirectiveUnwindRaw(SMLoc L); 00228 bool parseDirectiveTLSDescSeq(SMLoc L); 00229 bool parseDirectiveMovSP(SMLoc L); 00230 bool parseDirectiveObjectArch(SMLoc L); 00231 bool parseDirectiveArchExtension(SMLoc L); 00232 bool parseDirectiveAlign(SMLoc L); 00233 bool parseDirectiveThumbSet(SMLoc L); 00234 00235 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 00236 bool &CarrySetting, unsigned &ProcessorIMod, 00237 StringRef &ITMask); 00238 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst, 00239 bool &CanAcceptCarrySet, 00240 bool &CanAcceptPredicationCode); 00241 00242 bool isThumb() const { 00243 // FIXME: Can tablegen auto-generate this? 00244 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 00245 } 00246 bool isThumbOne() const { 00247 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 00248 } 00249 bool isThumbTwo() const { 00250 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 00251 } 00252 bool hasThumb() const { 00253 return STI.getFeatureBits() & ARM::HasV4TOps; 00254 } 00255 bool hasV6Ops() const { 00256 return STI.getFeatureBits() & ARM::HasV6Ops; 00257 } 00258 bool hasV6MOps() const { 00259 return STI.getFeatureBits() & ARM::HasV6MOps; 00260 } 00261 bool hasV7Ops() const { 00262 return STI.getFeatureBits() & ARM::HasV7Ops; 00263 } 00264 bool hasV8Ops() const { 00265 return STI.getFeatureBits() & ARM::HasV8Ops; 00266 } 00267 bool hasARM() const { 00268 return !(STI.getFeatureBits() & ARM::FeatureNoARM); 00269 } 00270 bool hasThumb2DSP() const { 00271 return STI.getFeatureBits() & ARM::FeatureDSPThumb2; 00272 } 00273 00274 void SwitchMode() { 00275 uint64_t FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 00276 setAvailableFeatures(FB); 00277 } 00278 bool isMClass() const { 00279 return STI.getFeatureBits() & ARM::FeatureMClass; 00280 } 00281 00282 /// @name Auto-generated Match Functions 00283 /// { 00284 00285 #define GET_ASSEMBLER_HEADER 00286 #include "ARMGenAsmMatcher.inc" 00287 00288 /// } 00289 00290 OperandMatchResultTy parseITCondCode(OperandVector &); 00291 OperandMatchResultTy parseCoprocNumOperand(OperandVector &); 00292 OperandMatchResultTy parseCoprocRegOperand(OperandVector &); 00293 OperandMatchResultTy parseCoprocOptionOperand(OperandVector &); 00294 OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &); 00295 OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &); 00296 OperandMatchResultTy parseProcIFlagsOperand(OperandVector &); 00297 OperandMatchResultTy parseMSRMaskOperand(OperandVector &); 00298 OperandMatchResultTy parseBankedRegOperand(OperandVector &); 00299 OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low, 00300 int High); 00301 OperandMatchResultTy parsePKHLSLImm(OperandVector &O) { 00302 return parsePKHImm(O, "lsl", 0, 31); 00303 } 00304 OperandMatchResultTy parsePKHASRImm(OperandVector &O) { 00305 return parsePKHImm(O, "asr", 1, 32); 00306 } 00307 OperandMatchResultTy parseSetEndImm(OperandVector &); 00308 OperandMatchResultTy parseShifterImm(OperandVector &); 00309 OperandMatchResultTy parseRotImm(OperandVector &); 00310 OperandMatchResultTy parseBitfield(OperandVector &); 00311 OperandMatchResultTy parsePostIdxReg(OperandVector &); 00312 OperandMatchResultTy parseAM3Offset(OperandVector &); 00313 OperandMatchResultTy parseFPImm(OperandVector &); 00314 OperandMatchResultTy parseVectorList(OperandVector &); 00315 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, 00316 SMLoc &EndLoc); 00317 00318 // Asm Match Converter Methods 00319 void cvtThumbMultiply(MCInst &Inst, const OperandVector &); 00320 void cvtThumbBranches(MCInst &Inst, const OperandVector &); 00321 00322 bool validateInstruction(MCInst &Inst, const OperandVector &Ops); 00323 bool processInstruction(MCInst &Inst, const OperandVector &Ops); 00324 bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands); 00325 bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands); 00326 00327 public: 00328 enum ARMMatchResultTy { 00329 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 00330 Match_RequiresNotITBlock, 00331 Match_RequiresV6, 00332 Match_RequiresThumb2, 00333 #define GET_OPERAND_DIAGNOSTIC_TYPES 00334 #include "ARMGenAsmMatcher.inc" 00335 00336 }; 00337 00338 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser, 00339 const MCInstrInfo &MII, 00340 const MCTargetOptions &Options) 00341 : MCTargetAsmParser(), STI(_STI), Parser(_Parser), MII(MII), UC(_Parser) { 00342 MCAsmParserExtension::Initialize(_Parser); 00343 00344 // Cache the MCRegisterInfo. 00345 MRI = getContext().getRegisterInfo(); 00346 00347 // Initialize the set of available features. 00348 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 00349 00350 // Not in an ITBlock to start with. 00351 ITState.CurPosition = ~0U; 00352 00353 NextSymbolIsThumb = false; 00354 } 00355 00356 // Implementation of the MCTargetAsmParser interface: 00357 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; 00358 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, 00359 SMLoc NameLoc, OperandVector &Operands) override; 00360 bool ParseDirective(AsmToken DirectiveID) override; 00361 00362 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, 00363 unsigned Kind) override; 00364 unsigned checkTargetMatchPredicate(MCInst &Inst) override; 00365 00366 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, 00367 OperandVector &Operands, MCStreamer &Out, 00368 uint64_t &ErrorInfo, 00369 bool MatchingInlineAsm) override; 00370 void onLabelParsed(MCSymbol *Symbol) override; 00371 }; 00372 } // end anonymous namespace 00373 00374 namespace { 00375 00376 /// ARMOperand - Instances of this class represent a parsed ARM machine 00377 /// operand. 00378 class ARMOperand : public MCParsedAsmOperand { 00379 enum KindTy { 00380 k_CondCode, 00381 k_CCOut, 00382 k_ITCondMask, 00383 k_CoprocNum, 00384 k_CoprocReg, 00385 k_CoprocOption, 00386 k_Immediate, 00387 k_MemBarrierOpt, 00388 k_InstSyncBarrierOpt, 00389 k_Memory, 00390 k_PostIndexRegister, 00391 k_MSRMask, 00392 k_BankedReg, 00393 k_ProcIFlags, 00394 k_VectorIndex, 00395 k_Register, 00396 k_RegisterList, 00397 k_DPRRegisterList, 00398 k_SPRRegisterList, 00399 k_VectorList, 00400 k_VectorListAllLanes, 00401 k_VectorListIndexed, 00402 k_ShiftedRegister, 00403 k_ShiftedImmediate, 00404 k_ShifterImmediate, 00405 k_RotateImmediate, 00406 k_BitfieldDescriptor, 00407 k_Token 00408 } Kind; 00409 00410 SMLoc StartLoc, EndLoc, AlignmentLoc; 00411 SmallVector<unsigned, 8> Registers; 00412 00413 struct CCOp { 00414 ARMCC::CondCodes Val; 00415 }; 00416 00417 struct CopOp { 00418 unsigned Val; 00419 }; 00420 00421 struct CoprocOptionOp { 00422 unsigned Val; 00423 }; 00424 00425 struct ITMaskOp { 00426 unsigned Mask:4; 00427 }; 00428 00429 struct MBOptOp { 00430 ARM_MB::MemBOpt Val; 00431 }; 00432 00433 struct ISBOptOp { 00434 ARM_ISB::InstSyncBOpt Val; 00435 }; 00436 00437 struct IFlagsOp { 00438 ARM_PROC::IFlags Val; 00439 }; 00440 00441 struct MMaskOp { 00442 unsigned Val; 00443 }; 00444 00445 struct BankedRegOp { 00446 unsigned Val; 00447 }; 00448 00449 struct TokOp { 00450 const char *Data; 00451 unsigned Length; 00452 }; 00453 00454 struct RegOp { 00455 unsigned RegNum; 00456 }; 00457 00458 // A vector register list is a sequential list of 1 to 4 registers. 00459 struct VectorListOp { 00460 unsigned RegNum; 00461 unsigned Count; 00462 unsigned LaneIndex; 00463 bool isDoubleSpaced; 00464 }; 00465 00466 struct VectorIndexOp { 00467 unsigned Val; 00468 }; 00469 00470 struct ImmOp { 00471 const MCExpr *Val; 00472 }; 00473 00474 /// Combined record for all forms of ARM address expressions. 00475 struct MemoryOp { 00476 unsigned BaseRegNum; 00477 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 00478 // was specified. 00479 const MCConstantExpr *OffsetImm; // Offset immediate value 00480 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 00481 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 00482 unsigned ShiftImm; // shift for OffsetReg. 00483 unsigned Alignment; // 0 = no alignment specified 00484 // n = alignment in bytes (2, 4, 8, 16, or 32) 00485 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 00486 }; 00487 00488 struct PostIdxRegOp { 00489 unsigned RegNum; 00490 bool isAdd; 00491 ARM_AM::ShiftOpc ShiftTy; 00492 unsigned ShiftImm; 00493 }; 00494 00495 struct ShifterImmOp { 00496 bool isASR; 00497 unsigned Imm; 00498 }; 00499 00500 struct RegShiftedRegOp { 00501 ARM_AM::ShiftOpc ShiftTy; 00502 unsigned SrcReg; 00503 unsigned ShiftReg; 00504 unsigned ShiftImm; 00505 }; 00506 00507 struct RegShiftedImmOp { 00508 ARM_AM::ShiftOpc ShiftTy; 00509 unsigned SrcReg; 00510 unsigned ShiftImm; 00511 }; 00512 00513 struct RotImmOp { 00514 unsigned Imm; 00515 }; 00516 00517 struct BitfieldOp { 00518 unsigned LSB; 00519 unsigned Width; 00520 }; 00521 00522 union { 00523 struct CCOp CC; 00524 struct CopOp Cop; 00525 struct CoprocOptionOp CoprocOption; 00526 struct MBOptOp MBOpt; 00527 struct ISBOptOp ISBOpt; 00528 struct ITMaskOp ITMask; 00529 struct IFlagsOp IFlags; 00530 struct MMaskOp MMask; 00531 struct BankedRegOp BankedReg; 00532 struct TokOp Tok; 00533 struct RegOp Reg; 00534 struct VectorListOp VectorList; 00535 struct VectorIndexOp VectorIndex; 00536 struct ImmOp Imm; 00537 struct MemoryOp Memory; 00538 struct PostIdxRegOp PostIdxReg; 00539 struct ShifterImmOp ShifterImm; 00540 struct RegShiftedRegOp RegShiftedReg; 00541 struct RegShiftedImmOp RegShiftedImm; 00542 struct RotImmOp RotImm; 00543 struct BitfieldOp Bitfield; 00544 }; 00545 00546 public: 00547 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 00548 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 00549 Kind = o.Kind; 00550 StartLoc = o.StartLoc; 00551 EndLoc = o.EndLoc; 00552 switch (Kind) { 00553 case k_CondCode: 00554 CC = o.CC; 00555 break; 00556 case k_ITCondMask: 00557 ITMask = o.ITMask; 00558 break; 00559 case k_Token: 00560 Tok = o.Tok; 00561 break; 00562 case k_CCOut: 00563 case k_Register: 00564 Reg = o.Reg; 00565 break; 00566 case k_RegisterList: 00567 case k_DPRRegisterList: 00568 case k_SPRRegisterList: 00569 Registers = o.Registers; 00570 break; 00571 case k_VectorList: 00572 case k_VectorListAllLanes: 00573 case k_VectorListIndexed: 00574 VectorList = o.VectorList; 00575 break; 00576 case k_CoprocNum: 00577 case k_CoprocReg: 00578 Cop = o.Cop; 00579 break; 00580 case k_CoprocOption: 00581 CoprocOption = o.CoprocOption; 00582 break; 00583 case k_Immediate: 00584 Imm = o.Imm; 00585 break; 00586 case k_MemBarrierOpt: 00587 MBOpt = o.MBOpt; 00588 break; 00589 case k_InstSyncBarrierOpt: 00590 ISBOpt = o.ISBOpt; 00591 case k_Memory: 00592 Memory = o.Memory; 00593 break; 00594 case k_PostIndexRegister: 00595 PostIdxReg = o.PostIdxReg; 00596 break; 00597 case k_MSRMask: 00598 MMask = o.MMask; 00599 break; 00600 case k_BankedReg: 00601 BankedReg = o.BankedReg; 00602 break; 00603 case k_ProcIFlags: 00604 IFlags = o.IFlags; 00605 break; 00606 case k_ShifterImmediate: 00607 ShifterImm = o.ShifterImm; 00608 break; 00609 case k_ShiftedRegister: 00610 RegShiftedReg = o.RegShiftedReg; 00611 break; 00612 case k_ShiftedImmediate: 00613 RegShiftedImm = o.RegShiftedImm; 00614 break; 00615 case k_RotateImmediate: 00616 RotImm = o.RotImm; 00617 break; 00618 case k_BitfieldDescriptor: 00619 Bitfield = o.Bitfield; 00620 break; 00621 case k_VectorIndex: 00622 VectorIndex = o.VectorIndex; 00623 break; 00624 } 00625 } 00626 00627 /// getStartLoc - Get the location of the first token of this operand. 00628 SMLoc getStartLoc() const override { return StartLoc; } 00629 /// getEndLoc - Get the location of the last token of this operand. 00630 SMLoc getEndLoc() const override { return EndLoc; } 00631 /// getLocRange - Get the range between the first and last token of this 00632 /// operand. 00633 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } 00634 00635 /// getAlignmentLoc - Get the location of the Alignment token of this operand. 00636 SMLoc getAlignmentLoc() const { 00637 assert(Kind == k_Memory && "Invalid access!"); 00638 return AlignmentLoc; 00639 } 00640 00641 ARMCC::CondCodes getCondCode() const { 00642 assert(Kind == k_CondCode && "Invalid access!"); 00643 return CC.Val; 00644 } 00645 00646 unsigned getCoproc() const { 00647 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 00648 return Cop.Val; 00649 } 00650 00651 StringRef getToken() const { 00652 assert(Kind == k_Token && "Invalid access!"); 00653 return StringRef(Tok.Data, Tok.Length); 00654 } 00655 00656 unsigned getReg() const override { 00657 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 00658 return Reg.RegNum; 00659 } 00660 00661 const SmallVectorImpl<unsigned> &getRegList() const { 00662 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 00663 Kind == k_SPRRegisterList) && "Invalid access!"); 00664 return Registers; 00665 } 00666 00667 const MCExpr *getImm() const { 00668 assert(isImm() && "Invalid access!"); 00669 return Imm.Val; 00670 } 00671 00672 unsigned getVectorIndex() const { 00673 assert(Kind == k_VectorIndex && "Invalid access!"); 00674 return VectorIndex.Val; 00675 } 00676 00677 ARM_MB::MemBOpt getMemBarrierOpt() const { 00678 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 00679 return MBOpt.Val; 00680 } 00681 00682 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const { 00683 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!"); 00684 return ISBOpt.Val; 00685 } 00686 00687 ARM_PROC::IFlags getProcIFlags() const { 00688 assert(Kind == k_ProcIFlags && "Invalid access!"); 00689 return IFlags.Val; 00690 } 00691 00692 unsigned getMSRMask() const { 00693 assert(Kind == k_MSRMask && "Invalid access!"); 00694 return MMask.Val; 00695 } 00696 00697 unsigned getBankedReg() const { 00698 assert(Kind == k_BankedReg && "Invalid access!"); 00699 return BankedReg.Val; 00700 } 00701 00702 bool isCoprocNum() const { return Kind == k_CoprocNum; } 00703 bool isCoprocReg() const { return Kind == k_CoprocReg; } 00704 bool isCoprocOption() const { return Kind == k_CoprocOption; } 00705 bool isCondCode() const { return Kind == k_CondCode; } 00706 bool isCCOut() const { return Kind == k_CCOut; } 00707 bool isITMask() const { return Kind == k_ITCondMask; } 00708 bool isITCondCode() const { return Kind == k_CondCode; } 00709 bool isImm() const override { return Kind == k_Immediate; } 00710 // checks whether this operand is an unsigned offset which fits is a field 00711 // of specified width and scaled by a specific number of bits 00712 template<unsigned width, unsigned scale> 00713 bool isUnsignedOffset() const { 00714 if (!isImm()) return false; 00715 if (isa<MCSymbolRefExpr>(Imm.Val)) return true; 00716 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { 00717 int64_t Val = CE->getValue(); 00718 int64_t Align = 1LL << scale; 00719 int64_t Max = Align * ((1LL << width) - 1); 00720 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max); 00721 } 00722 return false; 00723 } 00724 // checks whether this operand is an signed offset which fits is a field 00725 // of specified width and scaled by a specific number of bits 00726 template<unsigned width, unsigned scale> 00727 bool isSignedOffset() const { 00728 if (!isImm()) return false; 00729 if (isa<MCSymbolRefExpr>(Imm.Val)) return true; 00730 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { 00731 int64_t Val = CE->getValue(); 00732 int64_t Align = 1LL << scale; 00733 int64_t Max = Align * ((1LL << (width-1)) - 1); 00734 int64_t Min = -Align * (1LL << (width-1)); 00735 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max); 00736 } 00737 return false; 00738 } 00739 00740 // checks whether this operand is a memory operand computed as an offset 00741 // applied to PC. the offset may have 8 bits of magnitude and is represented 00742 // with two bits of shift. textually it may be either [pc, #imm], #imm or 00743 // relocable expression... 00744 bool isThumbMemPC() const { 00745 int64_t Val = 0; 00746 if (isImm()) { 00747 if (isa<MCSymbolRefExpr>(Imm.Val)) return true; 00748 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val); 00749 if (!CE) return false; 00750 Val = CE->getValue(); 00751 } 00752 else if (isMem()) { 00753 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false; 00754 if(Memory.BaseRegNum != ARM::PC) return false; 00755 Val = Memory.OffsetImm->getValue(); 00756 } 00757 else return false; 00758 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020); 00759 } 00760 bool isFPImm() const { 00761 if (!isImm()) return false; 00762 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00763 if (!CE) return false; 00764 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); 00765 return Val != -1; 00766 } 00767 bool isFBits16() const { 00768 if (!isImm()) return false; 00769 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00770 if (!CE) return false; 00771 int64_t Value = CE->getValue(); 00772 return Value >= 0 && Value <= 16; 00773 } 00774 bool isFBits32() const { 00775 if (!isImm()) return false; 00776 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00777 if (!CE) return false; 00778 int64_t Value = CE->getValue(); 00779 return Value >= 1 && Value <= 32; 00780 } 00781 bool isImm8s4() const { 00782 if (!isImm()) return false; 00783 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00784 if (!CE) return false; 00785 int64_t Value = CE->getValue(); 00786 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 00787 } 00788 bool isImm0_1020s4() const { 00789 if (!isImm()) return false; 00790 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00791 if (!CE) return false; 00792 int64_t Value = CE->getValue(); 00793 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 00794 } 00795 bool isImm0_508s4() const { 00796 if (!isImm()) return false; 00797 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00798 if (!CE) return false; 00799 int64_t Value = CE->getValue(); 00800 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 00801 } 00802 bool isImm0_508s4Neg() const { 00803 if (!isImm()) return false; 00804 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00805 if (!CE) return false; 00806 int64_t Value = -CE->getValue(); 00807 // explicitly exclude zero. we want that to use the normal 0_508 version. 00808 return ((Value & 3) == 0) && Value > 0 && Value <= 508; 00809 } 00810 bool isImm0_239() const { 00811 if (!isImm()) return false; 00812 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00813 if (!CE) return false; 00814 int64_t Value = CE->getValue(); 00815 return Value >= 0 && Value < 240; 00816 } 00817 bool isImm0_255() const { 00818 if (!isImm()) return false; 00819 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00820 if (!CE) return false; 00821 int64_t Value = CE->getValue(); 00822 return Value >= 0 && Value < 256; 00823 } 00824 bool isImm0_4095() const { 00825 if (!isImm()) return false; 00826 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00827 if (!CE) return false; 00828 int64_t Value = CE->getValue(); 00829 return Value >= 0 && Value < 4096; 00830 } 00831 bool isImm0_4095Neg() const { 00832 if (!isImm()) return false; 00833 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00834 if (!CE) return false; 00835 int64_t Value = -CE->getValue(); 00836 return Value > 0 && Value < 4096; 00837 } 00838 bool isImm0_1() const { 00839 if (!isImm()) return false; 00840 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00841 if (!CE) return false; 00842 int64_t Value = CE->getValue(); 00843 return Value >= 0 && Value < 2; 00844 } 00845 bool isImm0_3() const { 00846 if (!isImm()) return false; 00847 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00848 if (!CE) return false; 00849 int64_t Value = CE->getValue(); 00850 return Value >= 0 && Value < 4; 00851 } 00852 bool isImm0_7() const { 00853 if (!isImm()) return false; 00854 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00855 if (!CE) return false; 00856 int64_t Value = CE->getValue(); 00857 return Value >= 0 && Value < 8; 00858 } 00859 bool isImm0_15() const { 00860 if (!isImm()) return false; 00861 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00862 if (!CE) return false; 00863 int64_t Value = CE->getValue(); 00864 return Value >= 0 && Value < 16; 00865 } 00866 bool isImm0_31() const { 00867 if (!isImm()) return false; 00868 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00869 if (!CE) return false; 00870 int64_t Value = CE->getValue(); 00871 return Value >= 0 && Value < 32; 00872 } 00873 bool isImm0_63() const { 00874 if (!isImm()) return false; 00875 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00876 if (!CE) return false; 00877 int64_t Value = CE->getValue(); 00878 return Value >= 0 && Value < 64; 00879 } 00880 bool isImm8() const { 00881 if (!isImm()) return false; 00882 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00883 if (!CE) return false; 00884 int64_t Value = CE->getValue(); 00885 return Value == 8; 00886 } 00887 bool isImm16() const { 00888 if (!isImm()) return false; 00889 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00890 if (!CE) return false; 00891 int64_t Value = CE->getValue(); 00892 return Value == 16; 00893 } 00894 bool isImm32() const { 00895 if (!isImm()) return false; 00896 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00897 if (!CE) return false; 00898 int64_t Value = CE->getValue(); 00899 return Value == 32; 00900 } 00901 bool isShrImm8() const { 00902 if (!isImm()) return false; 00903 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00904 if (!CE) return false; 00905 int64_t Value = CE->getValue(); 00906 return Value > 0 && Value <= 8; 00907 } 00908 bool isShrImm16() const { 00909 if (!isImm()) return false; 00910 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00911 if (!CE) return false; 00912 int64_t Value = CE->getValue(); 00913 return Value > 0 && Value <= 16; 00914 } 00915 bool isShrImm32() const { 00916 if (!isImm()) return false; 00917 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00918 if (!CE) return false; 00919 int64_t Value = CE->getValue(); 00920 return Value > 0 && Value <= 32; 00921 } 00922 bool isShrImm64() const { 00923 if (!isImm()) return false; 00924 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00925 if (!CE) return false; 00926 int64_t Value = CE->getValue(); 00927 return Value > 0 && Value <= 64; 00928 } 00929 bool isImm1_7() const { 00930 if (!isImm()) return false; 00931 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00932 if (!CE) return false; 00933 int64_t Value = CE->getValue(); 00934 return Value > 0 && Value < 8; 00935 } 00936 bool isImm1_15() const { 00937 if (!isImm()) return false; 00938 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00939 if (!CE) return false; 00940 int64_t Value = CE->getValue(); 00941 return Value > 0 && Value < 16; 00942 } 00943 bool isImm1_31() const { 00944 if (!isImm()) return false; 00945 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00946 if (!CE) return false; 00947 int64_t Value = CE->getValue(); 00948 return Value > 0 && Value < 32; 00949 } 00950 bool isImm1_16() const { 00951 if (!isImm()) return false; 00952 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00953 if (!CE) return false; 00954 int64_t Value = CE->getValue(); 00955 return Value > 0 && Value < 17; 00956 } 00957 bool isImm1_32() const { 00958 if (!isImm()) return false; 00959 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00960 if (!CE) return false; 00961 int64_t Value = CE->getValue(); 00962 return Value > 0 && Value < 33; 00963 } 00964 bool isImm0_32() const { 00965 if (!isImm()) return false; 00966 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00967 if (!CE) return false; 00968 int64_t Value = CE->getValue(); 00969 return Value >= 0 && Value < 33; 00970 } 00971 bool isImm0_65535() const { 00972 if (!isImm()) return false; 00973 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00974 if (!CE) return false; 00975 int64_t Value = CE->getValue(); 00976 return Value >= 0 && Value < 65536; 00977 } 00978 bool isImm256_65535Expr() const { 00979 if (!isImm()) return false; 00980 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00981 // If it's not a constant expression, it'll generate a fixup and be 00982 // handled later. 00983 if (!CE) return true; 00984 int64_t Value = CE->getValue(); 00985 return Value >= 256 && Value < 65536; 00986 } 00987 bool isImm0_65535Expr() const { 00988 if (!isImm()) return false; 00989 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00990 // If it's not a constant expression, it'll generate a fixup and be 00991 // handled later. 00992 if (!CE) return true; 00993 int64_t Value = CE->getValue(); 00994 return Value >= 0 && Value < 65536; 00995 } 00996 bool isImm24bit() const { 00997 if (!isImm()) return false; 00998 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 00999 if (!CE) return false; 01000 int64_t Value = CE->getValue(); 01001 return Value >= 0 && Value <= 0xffffff; 01002 } 01003 bool isImmThumbSR() const { 01004 if (!isImm()) return false; 01005 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01006 if (!CE) return false; 01007 int64_t Value = CE->getValue(); 01008 return Value > 0 && Value < 33; 01009 } 01010 bool isPKHLSLImm() const { 01011 if (!isImm()) return false; 01012 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01013 if (!CE) return false; 01014 int64_t Value = CE->getValue(); 01015 return Value >= 0 && Value < 32; 01016 } 01017 bool isPKHASRImm() const { 01018 if (!isImm()) return false; 01019 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01020 if (!CE) return false; 01021 int64_t Value = CE->getValue(); 01022 return Value > 0 && Value <= 32; 01023 } 01024 bool isAdrLabel() const { 01025 // If we have an immediate that's not a constant, treat it as a label 01026 // reference needing a fixup. If it is a constant, but it can't fit 01027 // into shift immediate encoding, we reject it. 01028 if (isImm() && !isa<MCConstantExpr>(getImm())) return true; 01029 else return (isARMSOImm() || isARMSOImmNeg()); 01030 } 01031 bool isARMSOImm() const { 01032 if (!isImm()) return false; 01033 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01034 if (!CE) return false; 01035 int64_t Value = CE->getValue(); 01036 return ARM_AM::getSOImmVal(Value) != -1; 01037 } 01038 bool isARMSOImmNot() const { 01039 if (!isImm()) return false; 01040 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01041 if (!CE) return false; 01042 int64_t Value = CE->getValue(); 01043 return ARM_AM::getSOImmVal(~Value) != -1; 01044 } 01045 bool isARMSOImmNeg() const { 01046 if (!isImm()) return false; 01047 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01048 if (!CE) return false; 01049 int64_t Value = CE->getValue(); 01050 // Only use this when not representable as a plain so_imm. 01051 return ARM_AM::getSOImmVal(Value) == -1 && 01052 ARM_AM::getSOImmVal(-Value) != -1; 01053 } 01054 bool isT2SOImm() const { 01055 if (!isImm()) return false; 01056 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01057 if (!CE) return false; 01058 int64_t Value = CE->getValue(); 01059 return ARM_AM::getT2SOImmVal(Value) != -1; 01060 } 01061 bool isT2SOImmNot() const { 01062 if (!isImm()) return false; 01063 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01064 if (!CE) return false; 01065 int64_t Value = CE->getValue(); 01066 return ARM_AM::getT2SOImmVal(Value) == -1 && 01067 ARM_AM::getT2SOImmVal(~Value) != -1; 01068 } 01069 bool isT2SOImmNeg() const { 01070 if (!isImm()) return false; 01071 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01072 if (!CE) return false; 01073 int64_t Value = CE->getValue(); 01074 // Only use this when not representable as a plain so_imm. 01075 return ARM_AM::getT2SOImmVal(Value) == -1 && 01076 ARM_AM::getT2SOImmVal(-Value) != -1; 01077 } 01078 bool isSetEndImm() const { 01079 if (!isImm()) return false; 01080 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01081 if (!CE) return false; 01082 int64_t Value = CE->getValue(); 01083 return Value == 1 || Value == 0; 01084 } 01085 bool isReg() const override { return Kind == k_Register; } 01086 bool isRegList() const { return Kind == k_RegisterList; } 01087 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 01088 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 01089 bool isToken() const override { return Kind == k_Token; } 01090 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 01091 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; } 01092 bool isMem() const override { return Kind == k_Memory; } 01093 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 01094 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 01095 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 01096 bool isRotImm() const { return Kind == k_RotateImmediate; } 01097 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 01098 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 01099 bool isPostIdxReg() const { 01100 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift; 01101 } 01102 bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const { 01103 if (!isMem()) 01104 return false; 01105 // No offset of any kind. 01106 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr && 01107 (alignOK || Memory.Alignment == Alignment); 01108 } 01109 bool isMemPCRelImm12() const { 01110 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 01111 return false; 01112 // Base register must be PC. 01113 if (Memory.BaseRegNum != ARM::PC) 01114 return false; 01115 // Immediate offset in range [-4095, 4095]. 01116 if (!Memory.OffsetImm) return true; 01117 int64_t Val = Memory.OffsetImm->getValue(); 01118 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 01119 } 01120 bool isAlignedMemory() const { 01121 return isMemNoOffset(true); 01122 } 01123 bool isAlignedMemoryNone() const { 01124 return isMemNoOffset(false, 0); 01125 } 01126 bool isDupAlignedMemoryNone() const { 01127 return isMemNoOffset(false, 0); 01128 } 01129 bool isAlignedMemory16() const { 01130 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2. 01131 return true; 01132 return isMemNoOffset(false, 0); 01133 } 01134 bool isDupAlignedMemory16() const { 01135 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2. 01136 return true; 01137 return isMemNoOffset(false, 0); 01138 } 01139 bool isAlignedMemory32() const { 01140 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4. 01141 return true; 01142 return isMemNoOffset(false, 0); 01143 } 01144 bool isDupAlignedMemory32() const { 01145 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4. 01146 return true; 01147 return isMemNoOffset(false, 0); 01148 } 01149 bool isAlignedMemory64() const { 01150 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. 01151 return true; 01152 return isMemNoOffset(false, 0); 01153 } 01154 bool isDupAlignedMemory64() const { 01155 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. 01156 return true; 01157 return isMemNoOffset(false, 0); 01158 } 01159 bool isAlignedMemory64or128() const { 01160 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. 01161 return true; 01162 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16. 01163 return true; 01164 return isMemNoOffset(false, 0); 01165 } 01166 bool isDupAlignedMemory64or128() const { 01167 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. 01168 return true; 01169 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16. 01170 return true; 01171 return isMemNoOffset(false, 0); 01172 } 01173 bool isAlignedMemory64or128or256() const { 01174 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. 01175 return true; 01176 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16. 01177 return true; 01178 if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32. 01179 return true; 01180 return isMemNoOffset(false, 0); 01181 } 01182 bool isAddrMode2() const { 01183 if (!isMem() || Memory.Alignment != 0) return false; 01184 // Check for register offset. 01185 if (Memory.OffsetRegNum) return true; 01186 // Immediate offset in range [-4095, 4095]. 01187 if (!Memory.OffsetImm) return true; 01188 int64_t Val = Memory.OffsetImm->getValue(); 01189 return Val > -4096 && Val < 4096; 01190 } 01191 bool isAM2OffsetImm() const { 01192 if (!isImm()) return false; 01193 // Immediate offset in range [-4095, 4095]. 01194 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01195 if (!CE) return false; 01196 int64_t Val = CE->getValue(); 01197 return (Val == INT32_MIN) || (Val > -4096 && Val < 4096); 01198 } 01199 bool isAddrMode3() const { 01200 // If we have an immediate that's not a constant, treat it as a label 01201 // reference needing a fixup. If it is a constant, it's something else 01202 // and we reject it. 01203 if (isImm() && !isa<MCConstantExpr>(getImm())) 01204 return true; 01205 if (!isMem() || Memory.Alignment != 0) return false; 01206 // No shifts are legal for AM3. 01207 if (Memory.ShiftType != ARM_AM::no_shift) return false; 01208 // Check for register offset. 01209 if (Memory.OffsetRegNum) return true; 01210 // Immediate offset in range [-255, 255]. 01211 if (!Memory.OffsetImm) return true; 01212 int64_t Val = Memory.OffsetImm->getValue(); 01213 // The #-0 offset is encoded as INT32_MIN, and we have to check 01214 // for this too. 01215 return (Val > -256 && Val < 256) || Val == INT32_MIN; 01216 } 01217 bool isAM3Offset() const { 01218 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 01219 return false; 01220 if (Kind == k_PostIndexRegister) 01221 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 01222 // Immediate offset in range [-255, 255]. 01223 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01224 if (!CE) return false; 01225 int64_t Val = CE->getValue(); 01226 // Special case, #-0 is INT32_MIN. 01227 return (Val > -256 && Val < 256) || Val == INT32_MIN; 01228 } 01229 bool isAddrMode5() const { 01230 // If we have an immediate that's not a constant, treat it as a label 01231 // reference needing a fixup. If it is a constant, it's something else 01232 // and we reject it. 01233 if (isImm() && !isa<MCConstantExpr>(getImm())) 01234 return true; 01235 if (!isMem() || Memory.Alignment != 0) return false; 01236 // Check for register offset. 01237 if (Memory.OffsetRegNum) return false; 01238 // Immediate offset in range [-1020, 1020] and a multiple of 4. 01239 if (!Memory.OffsetImm) return true; 01240 int64_t Val = Memory.OffsetImm->getValue(); 01241 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 01242 Val == INT32_MIN; 01243 } 01244 bool isMemTBB() const { 01245 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || 01246 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 01247 return false; 01248 return true; 01249 } 01250 bool isMemTBH() const { 01251 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || 01252 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 01253 Memory.Alignment != 0 ) 01254 return false; 01255 return true; 01256 } 01257 bool isMemRegOffset() const { 01258 if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0) 01259 return false; 01260 return true; 01261 } 01262 bool isT2MemRegOffset() const { 01263 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || 01264 Memory.Alignment != 0) 01265 return false; 01266 // Only lsl #{0, 1, 2, 3} allowed. 01267 if (Memory.ShiftType == ARM_AM::no_shift) 01268 return true; 01269 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 01270 return false; 01271 return true; 01272 } 01273 bool isMemThumbRR() const { 01274 // Thumb reg+reg addressing is simple. Just two registers, a base and 01275 // an offset. No shifts, negations or any other complicating factors. 01276 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || 01277 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 01278 return false; 01279 return isARMLowRegister(Memory.BaseRegNum) && 01280 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 01281 } 01282 bool isMemThumbRIs4() const { 01283 if (!isMem() || Memory.OffsetRegNum != 0 || 01284 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 01285 return false; 01286 // Immediate offset, multiple of 4 in range [0, 124]. 01287 if (!Memory.OffsetImm) return true; 01288 int64_t Val = Memory.OffsetImm->getValue(); 01289 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 01290 } 01291 bool isMemThumbRIs2() const { 01292 if (!isMem() || Memory.OffsetRegNum != 0 || 01293 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 01294 return false; 01295 // Immediate offset, multiple of 4 in range [0, 62]. 01296 if (!Memory.OffsetImm) return true; 01297 int64_t Val = Memory.OffsetImm->getValue(); 01298 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 01299 } 01300 bool isMemThumbRIs1() const { 01301 if (!isMem() || Memory.OffsetRegNum != 0 || 01302 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 01303 return false; 01304 // Immediate offset in range [0, 31]. 01305 if (!Memory.OffsetImm) return true; 01306 int64_t Val = Memory.OffsetImm->getValue(); 01307 return Val >= 0 && Val <= 31; 01308 } 01309 bool isMemThumbSPI() const { 01310 if (!isMem() || Memory.OffsetRegNum != 0 || 01311 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 01312 return false; 01313 // Immediate offset, multiple of 4 in range [0, 1020]. 01314 if (!Memory.OffsetImm) return true; 01315 int64_t Val = Memory.OffsetImm->getValue(); 01316 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 01317 } 01318 bool isMemImm8s4Offset() const { 01319 // If we have an immediate that's not a constant, treat it as a label 01320 // reference needing a fixup. If it is a constant, it's something else 01321 // and we reject it. 01322 if (isImm() && !isa<MCConstantExpr>(getImm())) 01323 return true; 01324 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 01325 return false; 01326 // Immediate offset a multiple of 4 in range [-1020, 1020]. 01327 if (!Memory.OffsetImm) return true; 01328 int64_t Val = Memory.OffsetImm->getValue(); 01329 // Special case, #-0 is INT32_MIN. 01330 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN; 01331 } 01332 bool isMemImm0_1020s4Offset() const { 01333 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 01334 return false; 01335 // Immediate offset a multiple of 4 in range [0, 1020]. 01336 if (!Memory.OffsetImm) return true; 01337 int64_t Val = Memory.OffsetImm->getValue(); 01338 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 01339 } 01340 bool isMemImm8Offset() const { 01341 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 01342 return false; 01343 // Base reg of PC isn't allowed for these encodings. 01344 if (Memory.BaseRegNum == ARM::PC) return false; 01345 // Immediate offset in range [-255, 255]. 01346 if (!Memory.OffsetImm) return true; 01347 int64_t Val = Memory.OffsetImm->getValue(); 01348 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 01349 } 01350 bool isMemPosImm8Offset() const { 01351 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 01352 return false; 01353 // Immediate offset in range [0, 255]. 01354 if (!Memory.OffsetImm) return true; 01355 int64_t Val = Memory.OffsetImm->getValue(); 01356 return Val >= 0 && Val < 256; 01357 } 01358 bool isMemNegImm8Offset() const { 01359 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 01360 return false; 01361 // Base reg of PC isn't allowed for these encodings. 01362 if (Memory.BaseRegNum == ARM::PC) return false; 01363 // Immediate offset in range [-255, -1]. 01364 if (!Memory.OffsetImm) return false; 01365 int64_t Val = Memory.OffsetImm->getValue(); 01366 return (Val == INT32_MIN) || (Val > -256 && Val < 0); 01367 } 01368 bool isMemUImm12Offset() const { 01369 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 01370 return false; 01371 // Immediate offset in range [0, 4095]. 01372 if (!Memory.OffsetImm) return true; 01373 int64_t Val = Memory.OffsetImm->getValue(); 01374 return (Val >= 0 && Val < 4096); 01375 } 01376 bool isMemImm12Offset() const { 01377 // If we have an immediate that's not a constant, treat it as a label 01378 // reference needing a fixup. If it is a constant, it's something else 01379 // and we reject it. 01380 if (isImm() && !isa<MCConstantExpr>(getImm())) 01381 return true; 01382 01383 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 01384 return false; 01385 // Immediate offset in range [-4095, 4095]. 01386 if (!Memory.OffsetImm) return true; 01387 int64_t Val = Memory.OffsetImm->getValue(); 01388 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 01389 } 01390 bool isPostIdxImm8() const { 01391 if (!isImm()) return false; 01392 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01393 if (!CE) return false; 01394 int64_t Val = CE->getValue(); 01395 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 01396 } 01397 bool isPostIdxImm8s4() const { 01398 if (!isImm()) return false; 01399 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01400 if (!CE) return false; 01401 int64_t Val = CE->getValue(); 01402 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 01403 (Val == INT32_MIN); 01404 } 01405 01406 bool isMSRMask() const { return Kind == k_MSRMask; } 01407 bool isBankedReg() const { return Kind == k_BankedReg; } 01408 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 01409 01410 // NEON operands. 01411 bool isSingleSpacedVectorList() const { 01412 return Kind == k_VectorList && !VectorList.isDoubleSpaced; 01413 } 01414 bool isDoubleSpacedVectorList() const { 01415 return Kind == k_VectorList && VectorList.isDoubleSpaced; 01416 } 01417 bool isVecListOneD() const { 01418 if (!isSingleSpacedVectorList()) return false; 01419 return VectorList.Count == 1; 01420 } 01421 01422 bool isVecListDPair() const { 01423 if (!isSingleSpacedVectorList()) return false; 01424 return (ARMMCRegisterClasses[ARM::DPairRegClassID] 01425 .contains(VectorList.RegNum)); 01426 } 01427 01428 bool isVecListThreeD() const { 01429 if (!isSingleSpacedVectorList()) return false; 01430 return VectorList.Count == 3; 01431 } 01432 01433 bool isVecListFourD() const { 01434 if (!isSingleSpacedVectorList()) return false; 01435 return VectorList.Count == 4; 01436 } 01437 01438 bool isVecListDPairSpaced() const { 01439 if (Kind != k_VectorList) return false; 01440 if (isSingleSpacedVectorList()) return false; 01441 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID] 01442 .contains(VectorList.RegNum)); 01443 } 01444 01445 bool isVecListThreeQ() const { 01446 if (!isDoubleSpacedVectorList()) return false; 01447 return VectorList.Count == 3; 01448 } 01449 01450 bool isVecListFourQ() const { 01451 if (!isDoubleSpacedVectorList()) return false; 01452 return VectorList.Count == 4; 01453 } 01454 01455 bool isSingleSpacedVectorAllLanes() const { 01456 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced; 01457 } 01458 bool isDoubleSpacedVectorAllLanes() const { 01459 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced; 01460 } 01461 bool isVecListOneDAllLanes() const { 01462 if (!isSingleSpacedVectorAllLanes()) return false; 01463 return VectorList.Count == 1; 01464 } 01465 01466 bool isVecListDPairAllLanes() const { 01467 if (!isSingleSpacedVectorAllLanes()) return false; 01468 return (ARMMCRegisterClasses[ARM::DPairRegClassID] 01469 .contains(VectorList.RegNum)); 01470 } 01471 01472 bool isVecListDPairSpacedAllLanes() const { 01473 if (!isDoubleSpacedVectorAllLanes()) return false; 01474 return VectorList.Count == 2; 01475 } 01476 01477 bool isVecListThreeDAllLanes() const { 01478 if (!isSingleSpacedVectorAllLanes()) return false; 01479 return VectorList.Count == 3; 01480 } 01481 01482 bool isVecListThreeQAllLanes() const { 01483 if (!isDoubleSpacedVectorAllLanes()) return false; 01484 return VectorList.Count == 3; 01485 } 01486 01487 bool isVecListFourDAllLanes() const { 01488 if (!isSingleSpacedVectorAllLanes()) return false; 01489 return VectorList.Count == 4; 01490 } 01491 01492 bool isVecListFourQAllLanes() const { 01493 if (!isDoubleSpacedVectorAllLanes()) return false; 01494 return VectorList.Count == 4; 01495 } 01496 01497 bool isSingleSpacedVectorIndexed() const { 01498 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced; 01499 } 01500 bool isDoubleSpacedVectorIndexed() const { 01501 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced; 01502 } 01503 bool isVecListOneDByteIndexed() const { 01504 if (!isSingleSpacedVectorIndexed()) return false; 01505 return VectorList.Count == 1 && VectorList.LaneIndex <= 7; 01506 } 01507 01508 bool isVecListOneDHWordIndexed() const { 01509 if (!isSingleSpacedVectorIndexed()) return false; 01510 return VectorList.Count == 1 && VectorList.LaneIndex <= 3; 01511 } 01512 01513 bool isVecListOneDWordIndexed() const { 01514 if (!isSingleSpacedVectorIndexed()) return false; 01515 return VectorList.Count == 1 && VectorList.LaneIndex <= 1; 01516 } 01517 01518 bool isVecListTwoDByteIndexed() const { 01519 if (!isSingleSpacedVectorIndexed()) return false; 01520 return VectorList.Count == 2 && VectorList.LaneIndex <= 7; 01521 } 01522 01523 bool isVecListTwoDHWordIndexed() const { 01524 if (!isSingleSpacedVectorIndexed()) return false; 01525 return VectorList.Count == 2 && VectorList.LaneIndex <= 3; 01526 } 01527 01528 bool isVecListTwoQWordIndexed() const { 01529 if (!isDoubleSpacedVectorIndexed()) return false; 01530 return VectorList.Count == 2 && VectorList.LaneIndex <= 1; 01531 } 01532 01533 bool isVecListTwoQHWordIndexed() const { 01534 if (!isDoubleSpacedVectorIndexed()) return false; 01535 return VectorList.Count == 2 && VectorList.LaneIndex <= 3; 01536 } 01537 01538 bool isVecListTwoDWordIndexed() const { 01539 if (!isSingleSpacedVectorIndexed()) return false; 01540 return VectorList.Count == 2 && VectorList.LaneIndex <= 1; 01541 } 01542 01543 bool isVecListThreeDByteIndexed() const { 01544 if (!isSingleSpacedVectorIndexed()) return false; 01545 return VectorList.Count == 3 && VectorList.LaneIndex <= 7; 01546 } 01547 01548 bool isVecListThreeDHWordIndexed() const { 01549 if (!isSingleSpacedVectorIndexed()) return false; 01550 return VectorList.Count == 3 && VectorList.LaneIndex <= 3; 01551 } 01552 01553 bool isVecListThreeQWordIndexed() const { 01554 if (!isDoubleSpacedVectorIndexed()) return false; 01555 return VectorList.Count == 3 && VectorList.LaneIndex <= 1; 01556 } 01557 01558 bool isVecListThreeQHWordIndexed() const { 01559 if (!isDoubleSpacedVectorIndexed()) return false; 01560 return VectorList.Count == 3 && VectorList.LaneIndex <= 3; 01561 } 01562 01563 bool isVecListThreeDWordIndexed() const { 01564 if (!isSingleSpacedVectorIndexed()) return false; 01565 return VectorList.Count == 3 && VectorList.LaneIndex <= 1; 01566 } 01567 01568 bool isVecListFourDByteIndexed() const { 01569 if (!isSingleSpacedVectorIndexed()) return false; 01570 return VectorList.Count == 4 && VectorList.LaneIndex <= 7; 01571 } 01572 01573 bool isVecListFourDHWordIndexed() const { 01574 if (!isSingleSpacedVectorIndexed()) return false; 01575 return VectorList.Count == 4 && VectorList.LaneIndex <= 3; 01576 } 01577 01578 bool isVecListFourQWordIndexed() const { 01579 if (!isDoubleSpacedVectorIndexed()) return false; 01580 return VectorList.Count == 4 && VectorList.LaneIndex <= 1; 01581 } 01582 01583 bool isVecListFourQHWordIndexed() const { 01584 if (!isDoubleSpacedVectorIndexed()) return false; 01585 return VectorList.Count == 4 && VectorList.LaneIndex <= 3; 01586 } 01587 01588 bool isVecListFourDWordIndexed() const { 01589 if (!isSingleSpacedVectorIndexed()) return false; 01590 return VectorList.Count == 4 && VectorList.LaneIndex <= 1; 01591 } 01592 01593 bool isVectorIndex8() const { 01594 if (Kind != k_VectorIndex) return false; 01595 return VectorIndex.Val < 8; 01596 } 01597 bool isVectorIndex16() const { 01598 if (Kind != k_VectorIndex) return false; 01599 return VectorIndex.Val < 4; 01600 } 01601 bool isVectorIndex32() const { 01602 if (Kind != k_VectorIndex) return false; 01603 return VectorIndex.Val < 2; 01604 } 01605 01606 bool isNEONi8splat() const { 01607 if (!isImm()) return false; 01608 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01609 // Must be a constant. 01610 if (!CE) return false; 01611 int64_t Value = CE->getValue(); 01612 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 01613 // value. 01614 return Value >= 0 && Value < 256; 01615 } 01616 01617 bool isNEONi16splat() const { 01618 if (isNEONByteReplicate(2)) 01619 return false; // Leave that for bytes replication and forbid by default. 01620 if (!isImm()) 01621 return false; 01622 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01623 // Must be a constant. 01624 if (!CE) return false; 01625 int64_t Value = CE->getValue(); 01626 // i16 value in the range [0,255] or [0x0100, 0xff00] 01627 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 01628 } 01629 01630 bool isNEONi32splat() const { 01631 if (isNEONByteReplicate(4)) 01632 return false; // Leave that for bytes replication and forbid by default. 01633 if (!isImm()) 01634 return false; 01635 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01636 // Must be a constant. 01637 if (!CE) return false; 01638 int64_t Value = CE->getValue(); 01639 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 01640 return (Value >= 0 && Value < 256) || 01641 (Value >= 0x0100 && Value <= 0xff00) || 01642 (Value >= 0x010000 && Value <= 0xff0000) || 01643 (Value >= 0x01000000 && Value <= 0xff000000); 01644 } 01645 01646 bool isNEONByteReplicate(unsigned NumBytes) const { 01647 if (!isImm()) 01648 return false; 01649 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01650 // Must be a constant. 01651 if (!CE) 01652 return false; 01653 int64_t Value = CE->getValue(); 01654 if (!Value) 01655 return false; // Don't bother with zero. 01656 01657 unsigned char B = Value & 0xff; 01658 for (unsigned i = 1; i < NumBytes; ++i) { 01659 Value >>= 8; 01660 if ((Value & 0xff) != B) 01661 return false; 01662 } 01663 return true; 01664 } 01665 bool isNEONi16ByteReplicate() const { return isNEONByteReplicate(2); } 01666 bool isNEONi32ByteReplicate() const { return isNEONByteReplicate(4); } 01667 bool isNEONi32vmov() const { 01668 if (isNEONByteReplicate(4)) 01669 return false; // Let it to be classified as byte-replicate case. 01670 if (!isImm()) 01671 return false; 01672 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01673 // Must be a constant. 01674 if (!CE) 01675 return false; 01676 int64_t Value = CE->getValue(); 01677 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 01678 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 01679 return (Value >= 0 && Value < 256) || 01680 (Value >= 0x0100 && Value <= 0xff00) || 01681 (Value >= 0x010000 && Value <= 0xff0000) || 01682 (Value >= 0x01000000 && Value <= 0xff000000) || 01683 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 01684 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 01685 } 01686 bool isNEONi32vmovNeg() const { 01687 if (!isImm()) return false; 01688 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01689 // Must be a constant. 01690 if (!CE) return false; 01691 int64_t Value = ~CE->getValue(); 01692 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 01693 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 01694 return (Value >= 0 && Value < 256) || 01695 (Value >= 0x0100 && Value <= 0xff00) || 01696 (Value >= 0x010000 && Value <= 0xff0000) || 01697 (Value >= 0x01000000 && Value <= 0xff000000) || 01698 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 01699 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 01700 } 01701 01702 bool isNEONi64splat() const { 01703 if (!isImm()) return false; 01704 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01705 // Must be a constant. 01706 if (!CE) return false; 01707 uint64_t Value = CE->getValue(); 01708 // i64 value with each byte being either 0 or 0xff. 01709 for (unsigned i = 0; i < 8; ++i) 01710 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 01711 return true; 01712 } 01713 01714 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 01715 // Add as immediates when possible. Null MCExpr = 0. 01716 if (!Expr) 01717 Inst.addOperand(MCOperand::CreateImm(0)); 01718 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 01719 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 01720 else 01721 Inst.addOperand(MCOperand::CreateExpr(Expr)); 01722 } 01723 01724 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 01725 assert(N == 2 && "Invalid number of operands!"); 01726 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 01727 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 01728 Inst.addOperand(MCOperand::CreateReg(RegNum)); 01729 } 01730 01731 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 01732 assert(N == 1 && "Invalid number of operands!"); 01733 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 01734 } 01735 01736 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 01737 assert(N == 1 && "Invalid number of operands!"); 01738 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 01739 } 01740 01741 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 01742 assert(N == 1 && "Invalid number of operands!"); 01743 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 01744 } 01745 01746 void addITMaskOperands(MCInst &Inst, unsigned N) const { 01747 assert(N == 1 && "Invalid number of operands!"); 01748 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 01749 } 01750 01751 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 01752 assert(N == 1 && "Invalid number of operands!"); 01753 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 01754 } 01755 01756 void addCCOutOperands(MCInst &Inst, unsigned N) const { 01757 assert(N == 1 && "Invalid number of operands!"); 01758 Inst.addOperand(MCOperand::CreateReg(getReg())); 01759 } 01760 01761 void addRegOperands(MCInst &Inst, unsigned N) const { 01762 assert(N == 1 && "Invalid number of operands!"); 01763 Inst.addOperand(MCOperand::CreateReg(getReg())); 01764 } 01765 01766 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 01767 assert(N == 3 && "Invalid number of operands!"); 01768 assert(isRegShiftedReg() && 01769 "addRegShiftedRegOperands() on non-RegShiftedReg!"); 01770 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 01771 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 01772 Inst.addOperand(MCOperand::CreateImm( 01773 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 01774 } 01775 01776 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 01777 assert(N == 2 && "Invalid number of operands!"); 01778 assert(isRegShiftedImm() && 01779 "addRegShiftedImmOperands() on non-RegShiftedImm!"); 01780 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 01781 // Shift of #32 is encoded as 0 where permitted 01782 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm); 01783 Inst.addOperand(MCOperand::CreateImm( 01784 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm))); 01785 } 01786 01787 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 01788 assert(N == 1 && "Invalid number of operands!"); 01789 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 01790 ShifterImm.Imm)); 01791 } 01792 01793 void addRegListOperands(MCInst &Inst, unsigned N) const { 01794 assert(N == 1 && "Invalid number of operands!"); 01795 const SmallVectorImpl<unsigned> &RegList = getRegList(); 01796 for (SmallVectorImpl<unsigned>::const_iterator 01797 I = RegList.begin(), E = RegList.end(); I != E; ++I) 01798 Inst.addOperand(MCOperand::CreateReg(*I)); 01799 } 01800 01801 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 01802 addRegListOperands(Inst, N); 01803 } 01804 01805 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 01806 addRegListOperands(Inst, N); 01807 } 01808 01809 void addRotImmOperands(MCInst &Inst, unsigned N) const { 01810 assert(N == 1 && "Invalid number of operands!"); 01811 // Encoded as val>>3. The printer handles display as 8, 16, 24. 01812 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 01813 } 01814 01815 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 01816 assert(N == 1 && "Invalid number of operands!"); 01817 // Munge the lsb/width into a bitfield mask. 01818 unsigned lsb = Bitfield.LSB; 01819 unsigned width = Bitfield.Width; 01820 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 01821 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 01822 (32 - (lsb + width))); 01823 Inst.addOperand(MCOperand::CreateImm(Mask)); 01824 } 01825 01826 void addImmOperands(MCInst &Inst, unsigned N) const { 01827 assert(N == 1 && "Invalid number of operands!"); 01828 addExpr(Inst, getImm()); 01829 } 01830 01831 void addFBits16Operands(MCInst &Inst, unsigned N) const { 01832 assert(N == 1 && "Invalid number of operands!"); 01833 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01834 Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue())); 01835 } 01836 01837 void addFBits32Operands(MCInst &Inst, unsigned N) const { 01838 assert(N == 1 && "Invalid number of operands!"); 01839 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01840 Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue())); 01841 } 01842 01843 void addFPImmOperands(MCInst &Inst, unsigned N) const { 01844 assert(N == 1 && "Invalid number of operands!"); 01845 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01846 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); 01847 Inst.addOperand(MCOperand::CreateImm(Val)); 01848 } 01849 01850 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 01851 assert(N == 1 && "Invalid number of operands!"); 01852 // FIXME: We really want to scale the value here, but the LDRD/STRD 01853 // instruction don't encode operands that way yet. 01854 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01855 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 01856 } 01857 01858 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 01859 assert(N == 1 && "Invalid number of operands!"); 01860 // The immediate is scaled by four in the encoding and is stored 01861 // in the MCInst as such. Lop off the low two bits here. 01862 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01863 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 01864 } 01865 01866 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const { 01867 assert(N == 1 && "Invalid number of operands!"); 01868 // The immediate is scaled by four in the encoding and is stored 01869 // in the MCInst as such. Lop off the low two bits here. 01870 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01871 Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4))); 01872 } 01873 01874 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 01875 assert(N == 1 && "Invalid number of operands!"); 01876 // The immediate is scaled by four in the encoding and is stored 01877 // in the MCInst as such. Lop off the low two bits here. 01878 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01879 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 01880 } 01881 01882 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 01883 assert(N == 1 && "Invalid number of operands!"); 01884 // The constant encodes as the immediate-1, and we store in the instruction 01885 // the bits as encoded, so subtract off one here. 01886 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01887 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 01888 } 01889 01890 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 01891 assert(N == 1 && "Invalid number of operands!"); 01892 // The constant encodes as the immediate-1, and we store in the instruction 01893 // the bits as encoded, so subtract off one here. 01894 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01895 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 01896 } 01897 01898 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 01899 assert(N == 1 && "Invalid number of operands!"); 01900 // The constant encodes as the immediate, except for 32, which encodes as 01901 // zero. 01902 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01903 unsigned Imm = CE->getValue(); 01904 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 01905 } 01906 01907 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 01908 assert(N == 1 && "Invalid number of operands!"); 01909 // An ASR value of 32 encodes as 0, so that's how we want to add it to 01910 // the instruction as well. 01911 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01912 int Val = CE->getValue(); 01913 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 01914 } 01915 01916 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { 01917 assert(N == 1 && "Invalid number of operands!"); 01918 // The operand is actually a t2_so_imm, but we have its bitwise 01919 // negation in the assembly source, so twiddle it here. 01920 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01921 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 01922 } 01923 01924 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const { 01925 assert(N == 1 && "Invalid number of operands!"); 01926 // The operand is actually a t2_so_imm, but we have its 01927 // negation in the assembly source, so twiddle it here. 01928 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01929 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 01930 } 01931 01932 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const { 01933 assert(N == 1 && "Invalid number of operands!"); 01934 // The operand is actually an imm0_4095, but we have its 01935 // negation in the assembly source, so twiddle it here. 01936 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01937 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 01938 } 01939 01940 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const { 01941 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) { 01942 Inst.addOperand(MCOperand::CreateImm(CE->getValue() >> 2)); 01943 return; 01944 } 01945 01946 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val); 01947 assert(SR && "Unknown value type!"); 01948 Inst.addOperand(MCOperand::CreateExpr(SR)); 01949 } 01950 01951 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const { 01952 assert(N == 1 && "Invalid number of operands!"); 01953 if (isImm()) { 01954 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01955 if (CE) { 01956 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 01957 return; 01958 } 01959 01960 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val); 01961 assert(SR && "Unknown value type!"); 01962 Inst.addOperand(MCOperand::CreateExpr(SR)); 01963 return; 01964 } 01965 01966 assert(isMem() && "Unknown value type!"); 01967 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!"); 01968 Inst.addOperand(MCOperand::CreateImm(Memory.OffsetImm->getValue())); 01969 } 01970 01971 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const { 01972 assert(N == 1 && "Invalid number of operands!"); 01973 // The operand is actually a so_imm, but we have its bitwise 01974 // negation in the assembly source, so twiddle it here. 01975 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01976 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 01977 } 01978 01979 void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const { 01980 assert(N == 1 && "Invalid number of operands!"); 01981 // The operand is actually a so_imm, but we have its 01982 // negation in the assembly source, so twiddle it here. 01983 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 01984 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 01985 } 01986 01987 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 01988 assert(N == 1 && "Invalid number of operands!"); 01989 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 01990 } 01991 01992 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const { 01993 assert(N == 1 && "Invalid number of operands!"); 01994 Inst.addOperand(MCOperand::CreateImm(unsigned(getInstSyncBarrierOpt()))); 01995 } 01996 01997 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 01998 assert(N == 1 && "Invalid number of operands!"); 01999 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02000 } 02001 02002 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const { 02003 assert(N == 1 && "Invalid number of operands!"); 02004 int32_t Imm = Memory.OffsetImm->getValue(); 02005 Inst.addOperand(MCOperand::CreateImm(Imm)); 02006 } 02007 02008 void addAdrLabelOperands(MCInst &Inst, unsigned N) const { 02009 assert(N == 1 && "Invalid number of operands!"); 02010 assert(isImm() && "Not an immediate!"); 02011 02012 // If we have an immediate that's not a constant, treat it as a label 02013 // reference needing a fixup. 02014 if (!isa<MCConstantExpr>(getImm())) { 02015 Inst.addOperand(MCOperand::CreateExpr(getImm())); 02016 return; 02017 } 02018 02019 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 02020 int Val = CE->getValue(); 02021 Inst.addOperand(MCOperand::CreateImm(Val)); 02022 } 02023 02024 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 02025 assert(N == 2 && "Invalid number of operands!"); 02026 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02027 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 02028 } 02029 02030 void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const { 02031 addAlignedMemoryOperands(Inst, N); 02032 } 02033 02034 void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const { 02035 addAlignedMemoryOperands(Inst, N); 02036 } 02037 02038 void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const { 02039 addAlignedMemoryOperands(Inst, N); 02040 } 02041 02042 void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const { 02043 addAlignedMemoryOperands(Inst, N); 02044 } 02045 02046 void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const { 02047 addAlignedMemoryOperands(Inst, N); 02048 } 02049 02050 void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const { 02051 addAlignedMemoryOperands(Inst, N); 02052 } 02053 02054 void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const { 02055 addAlignedMemoryOperands(Inst, N); 02056 } 02057 02058 void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const { 02059 addAlignedMemoryOperands(Inst, N); 02060 } 02061 02062 void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const { 02063 addAlignedMemoryOperands(Inst, N); 02064 } 02065 02066 void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const { 02067 addAlignedMemoryOperands(Inst, N); 02068 } 02069 02070 void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const { 02071 addAlignedMemoryOperands(Inst, N); 02072 } 02073 02074 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 02075 assert(N == 3 && "Invalid number of operands!"); 02076 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 02077 if (!Memory.OffsetRegNum) { 02078 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 02079 // Special case for #-0 02080 if (Val == INT32_MIN) Val = 0; 02081 if (Val < 0) Val = -Val; 02082 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 02083 } else { 02084 // For register offset, we encode the shift type and negation flag 02085 // here. 02086 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 02087 Memory.ShiftImm, Memory.ShiftType); 02088 } 02089 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02090 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 02091 Inst.addOperand(MCOperand::CreateImm(Val)); 02092 } 02093 02094 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 02095 assert(N == 2 && "Invalid number of operands!"); 02096 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 02097 assert(CE && "non-constant AM2OffsetImm operand!"); 02098 int32_t Val = CE->getValue(); 02099 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 02100 // Special case for #-0 02101 if (Val == INT32_MIN) Val = 0; 02102 if (Val < 0) Val = -Val; 02103 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 02104 Inst.addOperand(MCOperand::CreateReg(0)); 02105 Inst.addOperand(MCOperand::CreateImm(Val)); 02106 } 02107 02108 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 02109 assert(N == 3 && "Invalid number of operands!"); 02110 // If we have an immediate that's not a constant, treat it as a label 02111 // reference needing a fixup. If it is a constant, it's something else 02112 // and we reject it. 02113 if (isImm()) { 02114 Inst.addOperand(MCOperand::CreateExpr(getImm())); 02115 Inst.addOperand(MCOperand::CreateReg(0)); 02116 Inst.addOperand(MCOperand::CreateImm(0)); 02117 return; 02118 } 02119 02120 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 02121 if (!Memory.OffsetRegNum) { 02122 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 02123 // Special case for #-0 02124 if (Val == INT32_MIN) Val = 0; 02125 if (Val < 0) Val = -Val; 02126 Val = ARM_AM::getAM3Opc(AddSub, Val); 02127 } else { 02128 // For register offset, we encode the shift type and negation flag 02129 // here. 02130 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 02131 } 02132 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02133 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 02134 Inst.addOperand(MCOperand::CreateImm(Val)); 02135 } 02136 02137 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 02138 assert(N == 2 && "Invalid number of operands!"); 02139 if (Kind == k_PostIndexRegister) { 02140 int32_t Val = 02141 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 02142 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 02143 Inst.addOperand(MCOperand::CreateImm(Val)); 02144 return; 02145 } 02146 02147 // Constant offset. 02148 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 02149 int32_t Val = CE->getValue(); 02150 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 02151 // Special case for #-0 02152 if (Val == INT32_MIN) Val = 0; 02153 if (Val < 0) Val = -Val; 02154 Val = ARM_AM::getAM3Opc(AddSub, Val); 02155 Inst.addOperand(MCOperand::CreateReg(0)); 02156 Inst.addOperand(MCOperand::CreateImm(Val)); 02157 } 02158 02159 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 02160 assert(N == 2 && "Invalid number of operands!"); 02161 // If we have an immediate that's not a constant, treat it as a label 02162 // reference needing a fixup. If it is a constant, it's something else 02163 // and we reject it. 02164 if (isImm()) { 02165 Inst.addOperand(MCOperand::CreateExpr(getImm())); 02166 Inst.addOperand(MCOperand::CreateImm(0)); 02167 return; 02168 } 02169 02170 // The lower two bits are always zero and as such are not encoded. 02171 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 02172 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 02173 // Special case for #-0 02174 if (Val == INT32_MIN) Val = 0; 02175 if (Val < 0) Val = -Val; 02176 Val = ARM_AM::getAM5Opc(AddSub, Val); 02177 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02178 Inst.addOperand(MCOperand::CreateImm(Val)); 02179 } 02180 02181 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 02182 assert(N == 2 && "Invalid number of operands!"); 02183 // If we have an immediate that's not a constant, treat it as a label 02184 // reference needing a fixup. If it is a constant, it's something else 02185 // and we reject it. 02186 if (isImm()) { 02187 Inst.addOperand(MCOperand::CreateExpr(getImm())); 02188 Inst.addOperand(MCOperand::CreateImm(0)); 02189 return; 02190 } 02191 02192 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 02193 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02194 Inst.addOperand(MCOperand::CreateImm(Val)); 02195 } 02196 02197 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 02198 assert(N == 2 && "Invalid number of operands!"); 02199 // The lower two bits are always zero and as such are not encoded. 02200 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 02201 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02202 Inst.addOperand(MCOperand::CreateImm(Val)); 02203 } 02204 02205 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 02206 assert(N == 2 && "Invalid number of operands!"); 02207 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 02208 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02209 Inst.addOperand(MCOperand::CreateImm(Val)); 02210 } 02211 02212 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 02213 addMemImm8OffsetOperands(Inst, N); 02214 } 02215 02216 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 02217 addMemImm8OffsetOperands(Inst, N); 02218 } 02219 02220 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 02221 assert(N == 2 && "Invalid number of operands!"); 02222 // If this is an immediate, it's a label reference. 02223 if (isImm()) { 02224 addExpr(Inst, getImm()); 02225 Inst.addOperand(MCOperand::CreateImm(0)); 02226 return; 02227 } 02228 02229 // Otherwise, it's a normal memory reg+offset. 02230 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 02231 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02232 Inst.addOperand(MCOperand::CreateImm(Val)); 02233 } 02234 02235 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 02236 assert(N == 2 && "Invalid number of operands!"); 02237 // If this is an immediate, it's a label reference. 02238 if (isImm()) { 02239 addExpr(Inst, getImm()); 02240 Inst.addOperand(MCOperand::CreateImm(0)); 02241 return; 02242 } 02243 02244 // Otherwise, it's a normal memory reg+offset. 02245 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 02246 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02247 Inst.addOperand(MCOperand::CreateImm(Val)); 02248 } 02249 02250 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 02251 assert(N == 2 && "Invalid number of operands!"); 02252 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02253 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 02254 } 02255 02256 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 02257 assert(N == 2 && "Invalid number of operands!"); 02258 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02259 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 02260 } 02261 02262 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 02263 assert(N == 3 && "Invalid number of operands!"); 02264 unsigned Val = 02265 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 02266 Memory.ShiftImm, Memory.ShiftType); 02267 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02268 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 02269 Inst.addOperand(MCOperand::CreateImm(Val)); 02270 } 02271 02272 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 02273 assert(N == 3 && "Invalid number of operands!"); 02274 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02275 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 02276 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 02277 } 02278 02279 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 02280 assert(N == 2 && "Invalid number of operands!"); 02281 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02282 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 02283 } 02284 02285 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 02286 assert(N == 2 && "Invalid number of operands!"); 02287 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 02288 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02289 Inst.addOperand(MCOperand::CreateImm(Val)); 02290 } 02291 02292 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 02293 assert(N == 2 && "Invalid number of operands!"); 02294 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 02295 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02296 Inst.addOperand(MCOperand::CreateImm(Val)); 02297 } 02298 02299 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 02300 assert(N == 2 && "Invalid number of operands!"); 02301 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 02302 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02303 Inst.addOperand(MCOperand::CreateImm(Val)); 02304 } 02305 02306 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 02307 assert(N == 2 && "Invalid number of operands!"); 02308 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 02309 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 02310 Inst.addOperand(MCOperand::CreateImm(Val)); 02311 } 02312 02313 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 02314 assert(N == 1 && "Invalid number of operands!"); 02315 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 02316 assert(CE && "non-constant post-idx-imm8 operand!"); 02317 int Imm = CE->getValue(); 02318 bool isAdd = Imm >= 0; 02319 if (Imm == INT32_MIN) Imm = 0; 02320 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 02321 Inst.addOperand(MCOperand::CreateImm(Imm)); 02322 } 02323 02324 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 02325 assert(N == 1 && "Invalid number of operands!"); 02326 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 02327 assert(CE && "non-constant post-idx-imm8s4 operand!"); 02328 int Imm = CE->getValue(); 02329 bool isAdd = Imm >= 0; 02330 if (Imm == INT32_MIN) Imm = 0; 02331 // Immediate is scaled by 4. 02332 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 02333 Inst.addOperand(MCOperand::CreateImm(Imm)); 02334 } 02335 02336 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 02337 assert(N == 2 && "Invalid number of operands!"); 02338 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 02339 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 02340 } 02341 02342 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 02343 assert(N == 2 && "Invalid number of operands!"); 02344 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 02345 // The sign, shift type, and shift amount are encoded in a single operand 02346 // using the AM2 encoding helpers. 02347 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 02348 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 02349 PostIdxReg.ShiftTy); 02350 Inst.addOperand(MCOperand::CreateImm(Imm)); 02351 } 02352 02353 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 02354 assert(N == 1 && "Invalid number of operands!"); 02355 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 02356 } 02357 02358 void addBankedRegOperands(MCInst &Inst, unsigned N) const { 02359 assert(N == 1 && "Invalid number of operands!"); 02360 Inst.addOperand(MCOperand::CreateImm(unsigned(getBankedReg()))); 02361 } 02362 02363 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 02364 assert(N == 1 && "Invalid number of operands!"); 02365 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 02366 } 02367 02368 void addVecListOperands(MCInst &Inst, unsigned N) const { 02369 assert(N == 1 && "Invalid number of operands!"); 02370 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 02371 } 02372 02373 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const { 02374 assert(N == 2 && "Invalid number of operands!"); 02375 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 02376 Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex)); 02377 } 02378 02379 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 02380 assert(N == 1 && "Invalid number of operands!"); 02381 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 02382 } 02383 02384 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 02385 assert(N == 1 && "Invalid number of operands!"); 02386 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 02387 } 02388 02389 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 02390 assert(N == 1 && "Invalid number of operands!"); 02391 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 02392 } 02393 02394 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 02395 assert(N == 1 && "Invalid number of operands!"); 02396 // The immediate encodes the type of constant as well as the value. 02397 // Mask in that this is an i8 splat. 02398 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 02399 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 02400 } 02401 02402 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 02403 assert(N == 1 && "Invalid number of operands!"); 02404 // The immediate encodes the type of constant as well as the value. 02405 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 02406 unsigned Value = CE->getValue(); 02407 if (Value >= 256) 02408 Value = (Value >> 8) | 0xa00; 02409 else 02410 Value |= 0x800; 02411 Inst.addOperand(MCOperand::CreateImm(Value)); 02412 } 02413 02414 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 02415 assert(N == 1 && "Invalid number of operands!"); 02416 // The immediate encodes the type of constant as well as the value. 02417 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 02418 unsigned Value = CE->getValue(); 02419 if (Value >= 256 && Value <= 0xff00) 02420 Value = (Value >> 8) | 0x200; 02421 else if (Value > 0xffff && Value <= 0xff0000) 02422 Value = (Value >> 16) | 0x400; 02423 else if (Value > 0xffffff) 02424 Value = (Value >> 24) | 0x600; 02425 Inst.addOperand(MCOperand::CreateImm(Value)); 02426 } 02427 02428 void addNEONinvByteReplicateOperands(MCInst &Inst, unsigned N) const { 02429 assert(N == 1 && "Invalid number of operands!"); 02430 // The immediate encodes the type of constant as well as the value. 02431 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 02432 unsigned Value = CE->getValue(); 02433 assert((Inst.getOpcode() == ARM::VMOVv8i8 || 02434 Inst.getOpcode() == ARM::VMOVv16i8) && 02435 "All vmvn instructions that wants to replicate non-zero byte " 02436 "always must be replaced with VMOVv8i8 or VMOVv16i8."); 02437 unsigned B = ((~Value) & 0xff); 02438 B |= 0xe00; // cmode = 0b1110 02439 Inst.addOperand(MCOperand::CreateImm(B)); 02440 } 02441 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 02442 assert(N == 1 && "Invalid number of operands!"); 02443 // The immediate encodes the type of constant as well as the value. 02444 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 02445 unsigned Value = CE->getValue(); 02446 if (Value >= 256 && Value <= 0xffff) 02447 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 02448 else if (Value > 0xffff && Value <= 0xffffff) 02449 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 02450 else if (Value > 0xffffff) 02451 Value = (Value >> 24) | 0x600; 02452 Inst.addOperand(MCOperand::CreateImm(Value)); 02453 } 02454 02455 void addNEONvmovByteReplicateOperands(MCInst &Inst, unsigned N) const { 02456 assert(N == 1 && "Invalid number of operands!"); 02457 // The immediate encodes the type of constant as well as the value. 02458 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 02459 unsigned Value = CE->getValue(); 02460 assert((Inst.getOpcode() == ARM::VMOVv8i8 || 02461 Inst.getOpcode() == ARM::VMOVv16i8) && 02462 "All instructions that wants to replicate non-zero byte " 02463 "always must be replaced with VMOVv8i8 or VMOVv16i8."); 02464 unsigned B = Value & 0xff; 02465 B |= 0xe00; // cmode = 0b1110 02466 Inst.addOperand(MCOperand::CreateImm(B)); 02467 } 02468 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const { 02469 assert(N == 1 && "Invalid number of operands!"); 02470 // The immediate encodes the type of constant as well as the value. 02471 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 02472 unsigned Value = ~CE->getValue(); 02473 if (Value >= 256 && Value <= 0xffff) 02474 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 02475 else if (Value > 0xffff && Value <= 0xffffff) 02476 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 02477 else if (Value > 0xffffff) 02478 Value = (Value >> 24) | 0x600; 02479 Inst.addOperand(MCOperand::CreateImm(Value)); 02480 } 02481 02482 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 02483 assert(N == 1 && "Invalid number of operands!"); 02484 // The immediate encodes the type of constant as well as the value. 02485 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 02486 uint64_t Value = CE->getValue(); 02487 unsigned Imm = 0; 02488 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 02489 Imm |= (Value & 1) << i; 02490 } 02491 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 02492 } 02493 02494 void print(raw_ostream &OS) const override; 02495 02496 static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) { 02497 auto Op = make_unique<ARMOperand>(k_ITCondMask); 02498 Op->ITMask.Mask = Mask; 02499 Op->StartLoc = S; 02500 Op->EndLoc = S; 02501 return Op; 02502 } 02503 02504 static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC, 02505 SMLoc S) { 02506 auto Op = make_unique<ARMOperand>(k_CondCode); 02507 Op->CC.Val = CC; 02508 Op->StartLoc = S; 02509 Op->EndLoc = S; 02510 return Op; 02511 } 02512 02513 static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) { 02514 auto Op = make_unique<ARMOperand>(k_CoprocNum); 02515 Op->Cop.Val = CopVal; 02516 Op->StartLoc = S; 02517 Op->EndLoc = S; 02518 return Op; 02519 } 02520 02521 static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) { 02522 auto Op = make_unique<ARMOperand>(k_CoprocReg); 02523 Op->Cop.Val = CopVal; 02524 Op->StartLoc = S; 02525 Op->EndLoc = S; 02526 return Op; 02527 } 02528 02529 static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S, 02530 SMLoc E) { 02531 auto Op = make_unique<ARMOperand>(k_CoprocOption); 02532 Op->Cop.Val = Val; 02533 Op->StartLoc = S; 02534 Op->EndLoc = E; 02535 return Op; 02536 } 02537 02538 static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) { 02539 auto Op = make_unique<ARMOperand>(k_CCOut); 02540 Op->Reg.RegNum = RegNum; 02541 Op->StartLoc = S; 02542 Op->EndLoc = S; 02543 return Op; 02544 } 02545 02546 static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) { 02547 auto Op = make_unique<ARMOperand>(k_Token); 02548 Op->Tok.Data = Str.data(); 02549 Op->Tok.Length = Str.size(); 02550 Op->StartLoc = S; 02551 Op->EndLoc = S; 02552 return Op; 02553 } 02554 02555 static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S, 02556 SMLoc E) { 02557 auto Op = make_unique<ARMOperand>(k_Register); 02558 Op->Reg.RegNum = RegNum; 02559 Op->StartLoc = S; 02560 Op->EndLoc = E; 02561 return Op; 02562 } 02563 02564 static std::unique_ptr<ARMOperand> 02565 CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg, 02566 unsigned ShiftReg, unsigned ShiftImm, SMLoc S, 02567 SMLoc E) { 02568 auto Op = make_unique<ARMOperand>(k_ShiftedRegister); 02569 Op->RegShiftedReg.ShiftTy = ShTy; 02570 Op->RegShiftedReg.SrcReg = SrcReg; 02571 Op->RegShiftedReg.ShiftReg = ShiftReg; 02572 Op->RegShiftedReg.ShiftImm = ShiftImm; 02573 Op->StartLoc = S; 02574 Op->EndLoc = E; 02575 return Op; 02576 } 02577 02578 static std::unique_ptr<ARMOperand> 02579 CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg, 02580 unsigned ShiftImm, SMLoc S, SMLoc E) { 02581 auto Op = make_unique<ARMOperand>(k_ShiftedImmediate); 02582 Op->RegShiftedImm.ShiftTy = ShTy; 02583 Op->RegShiftedImm.SrcReg = SrcReg; 02584 Op->RegShiftedImm.ShiftImm = ShiftImm; 02585 Op->StartLoc = S; 02586 Op->EndLoc = E; 02587 return Op; 02588 } 02589 02590 static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm, 02591 SMLoc S, SMLoc E) { 02592 auto Op = make_unique<ARMOperand>(k_ShifterImmediate); 02593 Op->ShifterImm.isASR = isASR; 02594 Op->ShifterImm.Imm = Imm; 02595 Op->StartLoc = S; 02596 Op->EndLoc = E; 02597 return Op; 02598 } 02599 02600 static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S, 02601 SMLoc E) { 02602 auto Op = make_unique<ARMOperand>(k_RotateImmediate); 02603 Op->RotImm.Imm = Imm; 02604 Op->StartLoc = S; 02605 Op->EndLoc = E; 02606 return Op; 02607 } 02608 02609 static std::unique_ptr<ARMOperand> 02610 CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) { 02611 auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor); 02612 Op->Bitfield.LSB = LSB; 02613 Op->Bitfield.Width = Width; 02614 Op->StartLoc = S; 02615 Op->EndLoc = E; 02616 return Op; 02617 } 02618 02619 static std::unique_ptr<ARMOperand> 02620 CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs, 02621 SMLoc StartLoc, SMLoc EndLoc) { 02622 assert (Regs.size() > 0 && "RegList contains no registers?"); 02623 KindTy Kind = k_RegisterList; 02624 02625 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second)) 02626 Kind = k_DPRRegisterList; 02627 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 02628 contains(Regs.front().second)) 02629 Kind = k_SPRRegisterList; 02630 02631 // Sort based on the register encoding values. 02632 array_pod_sort(Regs.begin(), Regs.end()); 02633 02634 auto Op = make_unique<ARMOperand>(Kind); 02635 for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator 02636 I = Regs.begin(), E = Regs.end(); I != E; ++I) 02637 Op->Registers.push_back(I->second); 02638 Op->StartLoc = StartLoc; 02639 Op->EndLoc = EndLoc; 02640 return Op; 02641 } 02642 02643 static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum, 02644 unsigned Count, 02645 bool isDoubleSpaced, 02646 SMLoc S, SMLoc E) { 02647 auto Op = make_unique<ARMOperand>(k_VectorList); 02648 Op->VectorList.RegNum = RegNum; 02649 Op->VectorList.Count = Count; 02650 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 02651 Op->StartLoc = S; 02652 Op->EndLoc = E; 02653 return Op; 02654 } 02655 02656 static std::unique_ptr<ARMOperand> 02657 CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced, 02658 SMLoc S, SMLoc E) { 02659 auto Op = make_unique<ARMOperand>(k_VectorListAllLanes); 02660 Op->VectorList.RegNum = RegNum; 02661 Op->VectorList.Count = Count; 02662 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 02663 Op->StartLoc = S; 02664 Op->EndLoc = E; 02665 return Op; 02666 } 02667 02668 static std::unique_ptr<ARMOperand> 02669 CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index, 02670 bool isDoubleSpaced, SMLoc S, SMLoc E) { 02671 auto Op = make_unique<ARMOperand>(k_VectorListIndexed); 02672 Op->VectorList.RegNum = RegNum; 02673 Op->VectorList.Count = Count; 02674 Op->VectorList.LaneIndex = Index; 02675 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 02676 Op->StartLoc = S; 02677 Op->EndLoc = E; 02678 return Op; 02679 } 02680 02681 static std::unique_ptr<ARMOperand> 02682 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) { 02683 auto Op = make_unique<ARMOperand>(k_VectorIndex); 02684 Op->VectorIndex.Val = Idx; 02685 Op->StartLoc = S; 02686 Op->EndLoc = E; 02687 return Op; 02688 } 02689 02690 static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S, 02691 SMLoc E) { 02692 auto Op = make_unique<ARMOperand>(k_Immediate); 02693 Op->Imm.Val = Val; 02694 Op->StartLoc = S; 02695 Op->EndLoc = E; 02696 return Op; 02697 } 02698 02699 static std::unique_ptr<ARMOperand> 02700 CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm, 02701 unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType, 02702 unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S, 02703 SMLoc E, SMLoc AlignmentLoc = SMLoc()) { 02704 auto Op = make_unique<ARMOperand>(k_Memory); 02705 Op->Memory.BaseRegNum = BaseRegNum; 02706 Op->Memory.OffsetImm = OffsetImm; 02707 Op->Memory.OffsetRegNum = OffsetRegNum; 02708 Op->Memory.ShiftType = ShiftType; 02709 Op->Memory.ShiftImm = ShiftImm; 02710 Op->Memory.Alignment = Alignment; 02711 Op->Memory.isNegative = isNegative; 02712 Op->StartLoc = S; 02713 Op->EndLoc = E; 02714 Op->AlignmentLoc = AlignmentLoc; 02715 return Op; 02716 } 02717 02718 static std::unique_ptr<ARMOperand> 02719 CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy, 02720 unsigned ShiftImm, SMLoc S, SMLoc E) { 02721 auto Op = make_unique<ARMOperand>(k_PostIndexRegister); 02722 Op->PostIdxReg.RegNum = RegNum; 02723 Op->PostIdxReg.isAdd = isAdd; 02724 Op->PostIdxReg.ShiftTy = ShiftTy; 02725 Op->PostIdxReg.ShiftImm = ShiftImm; 02726 Op->StartLoc = S; 02727 Op->EndLoc = E; 02728 return Op; 02729 } 02730 02731 static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, 02732 SMLoc S) { 02733 auto Op = make_unique<ARMOperand>(k_MemBarrierOpt); 02734 Op->MBOpt.Val = Opt; 02735 Op->StartLoc = S; 02736 Op->EndLoc = S; 02737 return Op; 02738 } 02739 02740 static std::unique_ptr<ARMOperand> 02741 CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) { 02742 auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt); 02743 Op->ISBOpt.Val = Opt; 02744 Op->StartLoc = S; 02745 Op->EndLoc = S; 02746 return Op; 02747 } 02748 02749 static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags, 02750 SMLoc S) { 02751 auto Op = make_unique<ARMOperand>(k_ProcIFlags); 02752 Op->IFlags.Val = IFlags; 02753 Op->StartLoc = S; 02754 Op->EndLoc = S; 02755 return Op; 02756 } 02757 02758 static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) { 02759 auto Op = make_unique<ARMOperand>(k_MSRMask); 02760 Op->MMask.Val = MMask; 02761 Op->StartLoc = S; 02762 Op->EndLoc = S; 02763 return Op; 02764 } 02765 02766 static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) { 02767 auto Op = make_unique<ARMOperand>(k_BankedReg); 02768 Op->BankedReg.Val = Reg; 02769 Op->StartLoc = S; 02770 Op->EndLoc = S; 02771 return Op; 02772 } 02773 }; 02774 02775 } // end anonymous namespace. 02776 02777 void ARMOperand::print(raw_ostream &OS) const { 02778 switch (Kind) { 02779 case k_CondCode: 02780 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 02781 break; 02782 case k_CCOut: 02783 OS << "<ccout " << getReg() << ">"; 02784 break; 02785 case k_ITCondMask: { 02786 static const char *const MaskStr[] = { 02787 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 02788 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 02789 }; 02790 assert((ITMask.Mask & 0xf) == ITMask.Mask); 02791 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 02792 break; 02793 } 02794 case k_CoprocNum: 02795 OS << "<coprocessor number: " << getCoproc() << ">"; 02796 break; 02797 case k_CoprocReg: 02798 OS << "<coprocessor register: " << getCoproc() << ">"; 02799 break; 02800 case k_CoprocOption: 02801 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 02802 break; 02803 case k_MSRMask: 02804 OS << "<mask: " << getMSRMask() << ">"; 02805 break; 02806 case k_BankedReg: 02807 OS << "<banked reg: " << getBankedReg() << ">"; 02808 break; 02809 case k_Immediate: 02810 getImm()->print(OS); 02811 break; 02812 case k_MemBarrierOpt: 02813 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">"; 02814 break; 02815 case k_InstSyncBarrierOpt: 02816 OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">"; 02817 break; 02818 case k_Memory: 02819 OS << "<memory " 02820 << " base:" << Memory.BaseRegNum; 02821 OS << ">"; 02822 break; 02823 case k_PostIndexRegister: 02824 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 02825 << PostIdxReg.RegNum; 02826 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 02827 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 02828 << PostIdxReg.ShiftImm; 02829 OS << ">"; 02830 break; 02831 case k_ProcIFlags: { 02832 OS << "<ARM_PROC::"; 02833 unsigned IFlags = getProcIFlags(); 02834 for (int i=2; i >= 0; --i) 02835 if (IFlags & (1 << i)) 02836 OS << ARM_PROC::IFlagsToString(1 << i); 02837 OS << ">"; 02838 break; 02839 } 02840 case k_Register: 02841 OS << "<register " << getReg() << ">"; 02842 break; 02843 case k_ShifterImmediate: 02844 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 02845 << " #" << ShifterImm.Imm << ">"; 02846 break; 02847 case k_ShiftedRegister: 02848 OS << "<so_reg_reg " 02849 << RegShiftedReg.SrcReg << " " 02850 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) 02851 << " " << RegShiftedReg.ShiftReg << ">"; 02852 break; 02853 case k_ShiftedImmediate: 02854 OS << "<so_reg_imm " 02855 << RegShiftedImm.SrcReg << " " 02856 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) 02857 << " #" << RegShiftedImm.ShiftImm << ">"; 02858 break; 02859 case k_RotateImmediate: 02860 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 02861 break; 02862 case k_BitfieldDescriptor: 02863 OS << "<bitfield " << "lsb: " << Bitfield.LSB 02864 << ", width: " << Bitfield.Width << ">"; 02865 break; 02866 case k_RegisterList: 02867 case k_DPRRegisterList: 02868 case k_SPRRegisterList: { 02869 OS << "<register_list "; 02870 02871 const SmallVectorImpl<unsigned> &RegList = getRegList(); 02872 for (SmallVectorImpl<unsigned>::const_iterator 02873 I = RegList.begin(), E = RegList.end(); I != E; ) { 02874 OS << *I; 02875 if (++I < E) OS << ", "; 02876 } 02877 02878 OS << ">"; 02879 break; 02880 } 02881 case k_VectorList: 02882 OS << "<vector_list " << VectorList.Count << " * " 02883 << VectorList.RegNum << ">"; 02884 break; 02885 case k_VectorListAllLanes: 02886 OS << "<vector_list(all lanes) " << VectorList.Count << " * " 02887 << VectorList.RegNum << ">"; 02888 break; 02889 case k_VectorListIndexed: 02890 OS << "<vector_list(lane " << VectorList.LaneIndex << ") " 02891 << VectorList.Count << " * " << VectorList.RegNum << ">"; 02892 break; 02893 case k_Token: 02894 OS << "'" << getToken() << "'"; 02895 break; 02896 case k_VectorIndex: 02897 OS << "<vectorindex " << getVectorIndex() << ">"; 02898 break; 02899 } 02900 } 02901 02902 /// @name Auto-generated Match Functions 02903 /// { 02904 02905 static unsigned MatchRegisterName(StringRef Name); 02906 02907 /// } 02908 02909 bool ARMAsmParser::ParseRegister(unsigned &RegNo, 02910 SMLoc &StartLoc, SMLoc &EndLoc) { 02911 StartLoc = Parser.getTok().getLoc(); 02912 EndLoc = Parser.getTok().getEndLoc(); 02913 RegNo = tryParseRegister(); 02914 02915 return (RegNo == (unsigned)-1); 02916 } 02917 02918 /// Try to parse a register name. The token must be an Identifier when called, 02919 /// and if it is a register name the token is eaten and the register number is 02920 /// returned. Otherwise return -1. 02921 /// 02922 int ARMAsmParser::tryParseRegister() { 02923 const AsmToken &Tok = Parser.getTok(); 02924 if (Tok.isNot(AsmToken::Identifier)) return -1; 02925 02926 std::string lowerCase = Tok.getString().lower(); 02927 unsigned RegNum = MatchRegisterName(lowerCase); 02928 if (!RegNum) { 02929 RegNum = StringSwitch<unsigned>(lowerCase) 02930 .Case("r13", ARM::SP) 02931 .Case("r14", ARM::LR) 02932 .Case("r15", ARM::PC) 02933 .Case("ip", ARM::R12) 02934 // Additional register name aliases for 'gas' compatibility. 02935 .Case("a1", ARM::R0) 02936 .Case("a2", ARM::R1) 02937 .Case("a3", ARM::R2) 02938 .Case("a4", ARM::R3) 02939 .Case("v1", ARM::R4) 02940 .Case("v2", ARM::R5) 02941 .Case("v3", ARM::R6) 02942 .Case("v4", ARM::R7) 02943 .Case("v5", ARM::R8) 02944 .Case("v6", ARM::R9) 02945 .Case("v7", ARM::R10) 02946 .Case("v8", ARM::R11) 02947 .Case("sb", ARM::R9) 02948 .Case("sl", ARM::R10) 02949 .Case("fp", ARM::R11) 02950 .Default(0); 02951 } 02952 if (!RegNum) { 02953 // Check for aliases registered via .req. Canonicalize to lower case. 02954 // That's more consistent since register names are case insensitive, and 02955 // it's how the original entry was passed in from MC/MCParser/AsmParser. 02956 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase); 02957 // If no match, return failure. 02958 if (Entry == RegisterReqs.end()) 02959 return -1; 02960 Parser.Lex(); // Eat identifier token. 02961 return Entry->getValue(); 02962 } 02963 02964 Parser.Lex(); // Eat identifier token. 02965 02966 return RegNum; 02967 } 02968 02969 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 02970 // If a recoverable error occurs, return 1. If an irrecoverable error 02971 // occurs, return -1. An irrecoverable error is one where tokens have been 02972 // consumed in the process of trying to parse the shifter (i.e., when it is 02973 // indeed a shifter operand, but malformed). 02974 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) { 02975 SMLoc S = Parser.getTok().getLoc(); 02976 const AsmToken &Tok = Parser.getTok(); 02977 if (Tok.isNot(AsmToken::Identifier)) 02978 return -1; 02979 02980 std::string lowerCase = Tok.getString().lower(); 02981 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 02982 .Case("asl", ARM_AM::lsl) 02983 .Case("lsl", ARM_AM::lsl) 02984 .Case("lsr", ARM_AM::lsr) 02985 .Case("asr", ARM_AM::asr) 02986 .Case("ror", ARM_AM::ror) 02987 .Case("rrx", ARM_AM::rrx) 02988 .Default(ARM_AM::no_shift); 02989 02990 if (ShiftTy == ARM_AM::no_shift) 02991 return 1; 02992 02993 Parser.Lex(); // Eat the operator. 02994 02995 // The source register for the shift has already been added to the 02996 // operand list, so we need to pop it off and combine it into the shifted 02997 // register operand instead. 02998 std::unique_ptr<ARMOperand> PrevOp( 02999 (ARMOperand *)Operands.pop_back_val().release()); 03000 if (!PrevOp->isReg()) 03001 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 03002 int SrcReg = PrevOp->getReg(); 03003 03004 SMLoc EndLoc; 03005 int64_t Imm = 0; 03006 int ShiftReg = 0; 03007 if (ShiftTy == ARM_AM::rrx) { 03008 // RRX Doesn't have an explicit shift amount. The encoder expects 03009 // the shift register to be the same as the source register. Seems odd, 03010 // but OK. 03011 ShiftReg = SrcReg; 03012 } else { 03013 // Figure out if this is shifted by a constant or a register (for non-RRX). 03014 if (Parser.getTok().is(AsmToken::Hash) || 03015 Parser.getTok().is(AsmToken::Dollar)) { 03016 Parser.Lex(); // Eat hash. 03017 SMLoc ImmLoc = Parser.getTok().getLoc(); 03018 const MCExpr *ShiftExpr = nullptr; 03019 if (getParser().parseExpression(ShiftExpr, EndLoc)) { 03020 Error(ImmLoc, "invalid immediate shift value"); 03021 return -1; 03022 } 03023 // The expression must be evaluatable as an immediate. 03024 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 03025 if (!CE) { 03026 Error(ImmLoc, "invalid immediate shift value"); 03027 return -1; 03028 } 03029 // Range check the immediate. 03030 // lsl, ror: 0 <= imm <= 31 03031 // lsr, asr: 0 <= imm <= 32 03032 Imm = CE->getValue(); 03033 if (Imm < 0 || 03034 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 03035 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 03036 Error(ImmLoc, "immediate shift value out of range"); 03037 return -1; 03038 } 03039 // shift by zero is a nop. Always send it through as lsl. 03040 // ('as' compatibility) 03041 if (Imm == 0) 03042 ShiftTy = ARM_AM::lsl; 03043 } else if (Parser.getTok().is(AsmToken::Identifier)) { 03044 SMLoc L = Parser.getTok().getLoc(); 03045 EndLoc = Parser.getTok().getEndLoc(); 03046 ShiftReg = tryParseRegister(); 03047 if (ShiftReg == -1) { 03048 Error(L, "expected immediate or register in shift operand"); 03049 return -1; 03050 } 03051 } else { 03052 Error(Parser.getTok().getLoc(), 03053 "expected immediate or register in shift operand"); 03054 return -1; 03055 } 03056 } 03057 03058 if (ShiftReg && ShiftTy != ARM_AM::rrx) 03059 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 03060 ShiftReg, Imm, 03061 S, EndLoc)); 03062 else 03063 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 03064 S, EndLoc)); 03065 03066 return 0; 03067 } 03068 03069 03070 /// Try to parse a register name. The token must be an Identifier when called. 03071 /// If it's a register, an AsmOperand is created. Another AsmOperand is created 03072 /// if there is a "writeback". 'true' if it's not a register. 03073 /// 03074 /// TODO this is likely to change to allow different register types and or to 03075 /// parse for a specific register type. 03076 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) { 03077 const AsmToken &RegTok = Parser.getTok(); 03078 int RegNo = tryParseRegister(); 03079 if (RegNo == -1) 03080 return true; 03081 03082 Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(), 03083 RegTok.getEndLoc())); 03084 03085 const AsmToken &ExclaimTok = Parser.getTok(); 03086 if (ExclaimTok.is(AsmToken::Exclaim)) { 03087 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 03088 ExclaimTok.getLoc())); 03089 Parser.Lex(); // Eat exclaim token 03090 return false; 03091 } 03092 03093 // Also check for an index operand. This is only legal for vector registers, 03094 // but that'll get caught OK in operand matching, so we don't need to 03095 // explicitly filter everything else out here. 03096 if (Parser.getTok().is(AsmToken::LBrac)) { 03097 SMLoc SIdx = Parser.getTok().getLoc(); 03098 Parser.Lex(); // Eat left bracket token. 03099 03100 const MCExpr *ImmVal; 03101 if (getParser().parseExpression(ImmVal)) 03102 return true; 03103 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 03104 if (!MCE) 03105 return TokError("immediate value expected for vector index"); 03106 03107 if (Parser.getTok().isNot(AsmToken::RBrac)) 03108 return Error(Parser.getTok().getLoc(), "']' expected"); 03109 03110 SMLoc E = Parser.getTok().getEndLoc(); 03111 Parser.Lex(); // Eat right bracket token. 03112 03113 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 03114 SIdx, E, 03115 getContext())); 03116 } 03117 03118 return false; 03119 } 03120 03121 /// MatchCoprocessorOperandName - Try to parse an coprocessor related 03122 /// instruction with a symbolic operand name. 03123 /// We accept "crN" syntax for GAS compatibility. 03124 /// <operand-name> ::= <prefix><number> 03125 /// If CoprocOp is 'c', then: 03126 /// <prefix> ::= c | cr 03127 /// If CoprocOp is 'p', then : 03128 /// <prefix> ::= p 03129 /// <number> ::= integer in range [0, 15] 03130 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 03131 // Use the same layout as the tablegen'erated register name matcher. Ugly, 03132 // but efficient. 03133 if (Name.size() < 2 || Name[0] != CoprocOp) 03134 return -1; 03135 Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front(); 03136 03137 switch (Name.size()) { 03138 default: return -1; 03139 case 1: 03140 switch (Name[0]) { 03141 default: return -1; 03142 case '0': return 0; 03143 case '1': return 1; 03144 case '2': return 2; 03145 case '3': return 3; 03146 case '4': return 4; 03147 case '5': return 5; 03148 case '6': return 6; 03149 case '7': return 7; 03150 case '8': return 8; 03151 case '9': return 9; 03152 } 03153 case 2: 03154 if (Name[0] != '1') 03155 return -1; 03156 switch (Name[1]) { 03157 default: return -1; 03158 // CP10 and CP11 are VFP/NEON and so vector instructions should be used. 03159 // However, old cores (v5/v6) did use them in that way. 03160 case '0': return 10; 03161 case '1': return 11; 03162 case '2': return 12; 03163 case '3': return 13; 03164 case '4': return 14; 03165 case '5': return 15; 03166 } 03167 } 03168 } 03169 03170 /// parseITCondCode - Try to parse a condition code for an IT instruction. 03171 ARMAsmParser::OperandMatchResultTy 03172 ARMAsmParser::parseITCondCode(OperandVector &Operands) { 03173 SMLoc S = Parser.getTok().getLoc(); 03174 const AsmToken &Tok = Parser.getTok(); 03175 if (!Tok.is(AsmToken::Identifier)) 03176 return MatchOperand_NoMatch; 03177 unsigned CC = StringSwitch<unsigned>(Tok.getString().lower()) 03178 .Case("eq", ARMCC::EQ) 03179 .Case("ne", ARMCC::NE) 03180 .Case("hs", ARMCC::HS) 03181 .Case("cs", ARMCC::HS) 03182 .Case("lo", ARMCC::LO) 03183 .Case("cc", ARMCC::LO) 03184 .Case("mi", ARMCC::MI) 03185 .Case("pl", ARMCC::PL) 03186 .Case("vs", ARMCC::VS) 03187 .Case("vc", ARMCC::VC) 03188 .Case("hi", ARMCC::HI) 03189 .Case("ls", ARMCC::LS) 03190 .Case("ge", ARMCC::GE) 03191 .Case("lt", ARMCC::LT) 03192 .Case("gt", ARMCC::GT) 03193 .Case("le", ARMCC::LE) 03194 .Case("al", ARMCC::AL) 03195 .Default(~0U); 03196 if (CC == ~0U) 03197 return MatchOperand_NoMatch; 03198 Parser.Lex(); // Eat the token. 03199 03200 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 03201 03202 return MatchOperand_Success; 03203 } 03204 03205 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 03206 /// token must be an Identifier when called, and if it is a coprocessor 03207 /// number, the token is eaten and the operand is added to the operand list. 03208 ARMAsmParser::OperandMatchResultTy 03209 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) { 03210 SMLoc S = Parser.getTok().getLoc(); 03211 const AsmToken &Tok = Parser.getTok(); 03212 if (Tok.isNot(AsmToken::Identifier)) 03213 return MatchOperand_NoMatch; 03214 03215 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 03216 if (Num == -1) 03217 return MatchOperand_NoMatch; 03218 // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions 03219 if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11)) 03220 return MatchOperand_NoMatch; 03221 03222 Parser.Lex(); // Eat identifier token. 03223 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 03224 return MatchOperand_Success; 03225 } 03226 03227 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 03228 /// token must be an Identifier when called, and if it is a coprocessor 03229 /// number, the token is eaten and the operand is added to the operand list. 03230 ARMAsmParser::OperandMatchResultTy 03231 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) { 03232 SMLoc S = Parser.getTok().getLoc(); 03233 const AsmToken &Tok = Parser.getTok(); 03234 if (Tok.isNot(AsmToken::Identifier)) 03235 return MatchOperand_NoMatch; 03236 03237 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 03238 if (Reg == -1) 03239 return MatchOperand_NoMatch; 03240 03241 Parser.Lex(); // Eat identifier token. 03242 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 03243 return MatchOperand_Success; 03244 } 03245 03246 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 03247 /// coproc_option : '{' imm0_255 '}' 03248 ARMAsmParser::OperandMatchResultTy 03249 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) { 03250 SMLoc S = Parser.getTok().getLoc(); 03251 03252 // If this isn't a '{', this isn't a coprocessor immediate operand. 03253 if (Parser.getTok().isNot(AsmToken::LCurly)) 03254 return MatchOperand_NoMatch; 03255 Parser.Lex(); // Eat the '{' 03256 03257 const MCExpr *Expr; 03258 SMLoc Loc = Parser.getTok().getLoc(); 03259 if (getParser().parseExpression(Expr)) { 03260 Error(Loc, "illegal expression"); 03261 return MatchOperand_ParseFail; 03262 } 03263 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 03264 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 03265 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 03266 return MatchOperand_ParseFail; 03267 } 03268 int Val = CE->getValue(); 03269 03270 // Check for and consume the closing '}' 03271 if (Parser.getTok().isNot(AsmToken::RCurly)) 03272 return MatchOperand_ParseFail; 03273 SMLoc E = Parser.getTok().getEndLoc(); 03274 Parser.Lex(); // Eat the '}' 03275 03276 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 03277 return MatchOperand_Success; 03278 } 03279 03280 // For register list parsing, we need to map from raw GPR register numbering 03281 // to the enumeration values. The enumeration values aren't sorted by 03282 // register number due to our using "sp", "lr" and "pc" as canonical names. 03283 static unsigned getNextRegister(unsigned Reg) { 03284 // If this is a GPR, we need to do it manually, otherwise we can rely 03285 // on the sort ordering of the enumeration since the other reg-classes 03286 // are sane. 03287 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 03288 return Reg + 1; 03289 switch(Reg) { 03290 default: llvm_unreachable("Invalid GPR number!"); 03291 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 03292 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 03293 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 03294 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 03295 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 03296 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 03297 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 03298 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 03299 } 03300 } 03301 03302 // Return the low-subreg of a given Q register. 03303 static unsigned getDRegFromQReg(unsigned QReg) { 03304 switch (QReg) { 03305 default: llvm_unreachable("expected a Q register!"); 03306 case ARM::Q0: return ARM::D0; 03307 case ARM::Q1: return ARM::D2; 03308 case ARM::Q2: return ARM::D4; 03309 case ARM::Q3: return ARM::D6; 03310 case ARM::Q4: return ARM::D8; 03311 case ARM::Q5: return ARM::D10; 03312 case ARM::Q6: return ARM::D12; 03313 case ARM::Q7: return ARM::D14; 03314 case ARM::Q8: return ARM::D16; 03315 case ARM::Q9: return ARM::D18; 03316 case ARM::Q10: return ARM::D20; 03317 case ARM::Q11: return ARM::D22; 03318 case ARM::Q12: return ARM::D24; 03319 case ARM::Q13: return ARM::D26; 03320 case ARM::Q14: return ARM::D28; 03321 case ARM::Q15: return ARM::D30; 03322 } 03323 } 03324 03325 /// Parse a register list. 03326 bool ARMAsmParser::parseRegisterList(OperandVector &Operands) { 03327 assert(Parser.getTok().is(AsmToken::LCurly) && 03328 "Token is not a Left Curly Brace"); 03329 SMLoc S = Parser.getTok().getLoc(); 03330 Parser.Lex(); // Eat '{' token. 03331 SMLoc RegLoc = Parser.getTok().getLoc(); 03332 03333 // Check the first register in the list to see what register class 03334 // this is a list of. 03335 int Reg = tryParseRegister(); 03336 if (Reg == -1) 03337 return Error(RegLoc, "register expected"); 03338 03339 // The reglist instructions have at most 16 registers, so reserve 03340 // space for that many. 03341 int EReg = 0; 03342 SmallVector<std::pair<unsigned, unsigned>, 16> Registers; 03343 03344 // Allow Q regs and just interpret them as the two D sub-registers. 03345 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 03346 Reg = getDRegFromQReg(Reg); 03347 EReg = MRI->getEncodingValue(Reg); 03348 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 03349 ++Reg; 03350 } 03351 const MCRegisterClass *RC; 03352 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 03353 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 03354 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 03355 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 03356 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 03357 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 03358 else 03359 return Error(RegLoc, "invalid register in register list"); 03360 03361 // Store the register. 03362 EReg = MRI->getEncodingValue(Reg); 03363 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 03364 03365 // This starts immediately after the first register token in the list, 03366 // so we can see either a comma or a minus (range separator) as a legal 03367 // next token. 03368 while (Parser.getTok().is(AsmToken::Comma) || 03369 Parser.getTok().is(AsmToken::Minus)) { 03370 if (Parser.getTok().is(AsmToken::Minus)) { 03371 Parser.Lex(); // Eat the minus. 03372 SMLoc AfterMinusLoc = Parser.getTok().getLoc(); 03373 int EndReg = tryParseRegister(); 03374 if (EndReg == -1) 03375 return Error(AfterMinusLoc, "register expected"); 03376 // Allow Q regs and just interpret them as the two D sub-registers. 03377 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 03378 EndReg = getDRegFromQReg(EndReg) + 1; 03379 // If the register is the same as the start reg, there's nothing 03380 // more to do. 03381 if (Reg == EndReg) 03382 continue; 03383 // The register must be in the same register class as the first. 03384 if (!RC->contains(EndReg)) 03385 return Error(AfterMinusLoc, "invalid register in register list"); 03386 // Ranges must go from low to high. 03387 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg)) 03388 return Error(AfterMinusLoc, "bad range in register list"); 03389 03390 // Add all the registers in the range to the register list. 03391 while (Reg != EndReg) { 03392 Reg = getNextRegister(Reg); 03393 EReg = MRI->getEncodingValue(Reg); 03394 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 03395 } 03396 continue; 03397 } 03398 Parser.Lex(); // Eat the comma. 03399 RegLoc = Parser.getTok().getLoc(); 03400 int OldReg = Reg; 03401 const AsmToken RegTok = Parser.getTok(); 03402 Reg = tryParseRegister(); 03403 if (Reg == -1) 03404 return Error(RegLoc, "register expected"); 03405 // Allow Q regs and just interpret them as the two D sub-registers. 03406 bool isQReg = false; 03407 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 03408 Reg = getDRegFromQReg(Reg); 03409 isQReg = true; 03410 } 03411 // The register must be in the same register class as the first. 03412 if (!RC->contains(Reg)) 03413 return Error(RegLoc, "invalid register in register list"); 03414 // List must be monotonically increasing. 03415 if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) { 03416 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 03417 Warning(RegLoc, "register list not in ascending order"); 03418 else 03419 return Error(RegLoc, "register list not in ascending order"); 03420 } 03421 if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) { 03422 Warning(RegLoc, "duplicated register (" + RegTok.getString() + 03423 ") in register list"); 03424 continue; 03425 } 03426 // VFP register lists must also be contiguous. 03427 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 03428 Reg != OldReg + 1) 03429 return Error(RegLoc, "non-contiguous register range"); 03430 EReg = MRI->getEncodingValue(Reg); 03431 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 03432 if (isQReg) { 03433 EReg = MRI->getEncodingValue(++Reg); 03434 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 03435 } 03436 } 03437 03438 if (Parser.getTok().isNot(AsmToken::RCurly)) 03439 return Error(Parser.getTok().getLoc(), "'}' expected"); 03440 SMLoc E = Parser.getTok().getEndLoc(); 03441 Parser.Lex(); // Eat '}' token. 03442 03443 // Push the register list operand. 03444 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 03445 03446 // The ARM system instruction variants for LDM/STM have a '^' token here. 03447 if (Parser.getTok().is(AsmToken::Caret)) { 03448 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc())); 03449 Parser.Lex(); // Eat '^' token. 03450 } 03451 03452 return false; 03453 } 03454 03455 // Helper function to parse the lane index for vector lists. 03456 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 03457 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) { 03458 Index = 0; // Always return a defined index value. 03459 if (Parser.getTok().is(AsmToken::LBrac)) { 03460 Parser.Lex(); // Eat the '['. 03461 if (Parser.getTok().is(AsmToken::RBrac)) { 03462 // "Dn[]" is the 'all lanes' syntax. 03463 LaneKind = AllLanes; 03464 EndLoc = Parser.getTok().getEndLoc(); 03465 Parser.Lex(); // Eat the ']'. 03466 return MatchOperand_Success; 03467 } 03468 03469 // There's an optional '#' token here. Normally there wouldn't be, but 03470 // inline assemble puts one in, and it's friendly to accept that. 03471 if (Parser.getTok().is(AsmToken::Hash)) 03472 Parser.Lex(); // Eat '#' or '$'. 03473 03474 const MCExpr *LaneIndex; 03475 SMLoc Loc = Parser.getTok().getLoc(); 03476 if (getParser().parseExpression(LaneIndex)) { 03477 Error(Loc, "illegal expression"); 03478 return MatchOperand_ParseFail; 03479 } 03480 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex); 03481 if (!CE) { 03482 Error(Loc, "lane index must be empty or an integer"); 03483 return MatchOperand_ParseFail; 03484 } 03485 if (Parser.getTok().isNot(AsmToken::RBrac)) { 03486 Error(Parser.getTok().getLoc(), "']' expected"); 03487 return MatchOperand_ParseFail; 03488 } 03489 EndLoc = Parser.getTok().getEndLoc(); 03490 Parser.Lex(); // Eat the ']'. 03491 int64_t Val = CE->getValue(); 03492 03493 // FIXME: Make this range check context sensitive for .8, .16, .32. 03494 if (Val < 0 || Val > 7) { 03495 Error(Parser.getTok().getLoc(), "lane index out of range"); 03496 return MatchOperand_ParseFail; 03497 } 03498 Index = Val; 03499 LaneKind = IndexedLane; 03500 return MatchOperand_Success; 03501 } 03502 LaneKind = NoLanes; 03503 return MatchOperand_Success; 03504 } 03505 03506 // parse a vector register list 03507 ARMAsmParser::OperandMatchResultTy 03508 ARMAsmParser::parseVectorList(OperandVector &Operands) { 03509 VectorLaneTy LaneKind; 03510 unsigned LaneIndex; 03511 SMLoc S = Parser.getTok().getLoc(); 03512 // As an extension (to match gas), support a plain D register or Q register 03513 // (without encosing curly braces) as a single or double entry list, 03514 // respectively. 03515 if (Parser.getTok().is(AsmToken::Identifier)) { 03516 SMLoc E = Parser.getTok().getEndLoc(); 03517 int Reg = tryParseRegister(); 03518 if (Reg == -1) 03519 return MatchOperand_NoMatch; 03520 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 03521 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E); 03522 if (Res != MatchOperand_Success) 03523 return Res; 03524 switch (LaneKind) { 03525 case NoLanes: 03526 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E)); 03527 break; 03528 case AllLanes: 03529 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false, 03530 S, E)); 03531 break; 03532 case IndexedLane: 03533 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1, 03534 LaneIndex, 03535 false, S, E)); 03536 break; 03537 } 03538 return MatchOperand_Success; 03539 } 03540 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 03541 Reg = getDRegFromQReg(Reg); 03542 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E); 03543 if (Res != MatchOperand_Success) 03544 return Res; 03545 switch (LaneKind) { 03546 case NoLanes: 03547 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0, 03548 &ARMMCRegisterClasses[ARM::DPairRegClassID]); 03549 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E)); 03550 break; 03551 case AllLanes: 03552 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0, 03553 &ARMMCRegisterClasses[ARM::DPairRegClassID]); 03554 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false, 03555 S, E)); 03556 break; 03557 case IndexedLane: 03558 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2, 03559 LaneIndex, 03560 false, S, E)); 03561 break; 03562 } 03563 return MatchOperand_Success; 03564 } 03565 Error(S, "vector register expected"); 03566 return MatchOperand_ParseFail; 03567 } 03568 03569 if (Parser.getTok().isNot(AsmToken::LCurly)) 03570 return MatchOperand_NoMatch; 03571 03572 Parser.Lex(); // Eat '{' token. 03573 SMLoc RegLoc = Parser.getTok().getLoc(); 03574 03575 int Reg = tryParseRegister(); 03576 if (Reg == -1) { 03577 Error(RegLoc, "register expected"); 03578 return MatchOperand_ParseFail; 03579 } 03580 unsigned Count = 1; 03581 int Spacing = 0; 03582 unsigned FirstReg = Reg; 03583 // The list is of D registers, but we also allow Q regs and just interpret 03584 // them as the two D sub-registers. 03585 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 03586 FirstReg = Reg = getDRegFromQReg(Reg); 03587 Spacing = 1; // double-spacing requires explicit D registers, otherwise 03588 // it's ambiguous with four-register single spaced. 03589 ++Reg; 03590 ++Count; 03591 } 03592 03593 SMLoc E; 03594 if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success) 03595 return MatchOperand_ParseFail; 03596 03597 while (Parser.getTok().is(AsmToken::Comma) || 03598 Parser.getTok().is(AsmToken::Minus)) { 03599 if (Parser.getTok().is(AsmToken::Minus)) { 03600 if (!Spacing) 03601 Spacing = 1; // Register range implies a single spaced list. 03602 else if (Spacing == 2) { 03603 Error(Parser.getTok().getLoc(), 03604 "sequential registers in double spaced list"); 03605 return MatchOperand_ParseFail; 03606 } 03607 Parser.Lex(); // Eat the minus. 03608 SMLoc AfterMinusLoc = Parser.getTok().getLoc(); 03609 int EndReg = tryParseRegister(); 03610 if (EndReg == -1) { 03611 Error(AfterMinusLoc, "register expected"); 03612 return MatchOperand_ParseFail; 03613 } 03614 // Allow Q regs and just interpret them as the two D sub-registers. 03615 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 03616 EndReg = getDRegFromQReg(EndReg) + 1; 03617 // If the register is the same as the start reg, there's nothing 03618 // more to do. 03619 if (Reg == EndReg) 03620 continue; 03621 // The register must be in the same register class as the first. 03622 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) { 03623 Error(AfterMinusLoc, "invalid register in register list"); 03624 return MatchOperand_ParseFail; 03625 } 03626 // Ranges must go from low to high. 03627 if (Reg > EndReg) { 03628 Error(AfterMinusLoc, "bad range in register list"); 03629 return MatchOperand_ParseFail; 03630 } 03631 // Parse the lane specifier if present. 03632 VectorLaneTy NextLaneKind; 03633 unsigned NextLaneIndex; 03634 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != 03635 MatchOperand_Success) 03636 return MatchOperand_ParseFail; 03637 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 03638 Error(AfterMinusLoc, "mismatched lane index in register list"); 03639 return MatchOperand_ParseFail; 03640 } 03641 03642 // Add all the registers in the range to the register list. 03643 Count += EndReg - Reg; 03644 Reg = EndReg; 03645 continue; 03646 } 03647 Parser.Lex(); // Eat the comma. 03648 RegLoc = Parser.getTok().getLoc(); 03649 int OldReg = Reg; 03650 Reg = tryParseRegister(); 03651 if (Reg == -1) { 03652 Error(RegLoc, "register expected"); 03653 return MatchOperand_ParseFail; 03654 } 03655 // vector register lists must be contiguous. 03656 // It's OK to use the enumeration values directly here rather, as the 03657 // VFP register classes have the enum sorted properly. 03658 // 03659 // The list is of D registers, but we also allow Q regs and just interpret 03660 // them as the two D sub-registers. 03661 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 03662 if (!Spacing) 03663 Spacing = 1; // Register range implies a single spaced list. 03664 else if (Spacing == 2) { 03665 Error(RegLoc, 03666 "invalid register in double-spaced list (must be 'D' register')"); 03667 return MatchOperand_ParseFail; 03668 } 03669 Reg = getDRegFromQReg(Reg); 03670 if (Reg != OldReg + 1) { 03671 Error(RegLoc, "non-contiguous register range"); 03672 return MatchOperand_ParseFail; 03673 } 03674 ++Reg; 03675 Count += 2; 03676 // Parse the lane specifier if present. 03677 VectorLaneTy NextLaneKind; 03678 unsigned NextLaneIndex; 03679 SMLoc LaneLoc = Parser.getTok().getLoc(); 03680 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != 03681 MatchOperand_Success) 03682 return MatchOperand_ParseFail; 03683 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 03684 Error(LaneLoc, "mismatched lane index in register list"); 03685 return MatchOperand_ParseFail; 03686 } 03687 continue; 03688 } 03689 // Normal D register. 03690 // Figure out the register spacing (single or double) of the list if 03691 // we don't know it already. 03692 if (!Spacing) 03693 Spacing = 1 + (Reg == OldReg + 2); 03694 03695 // Just check that it's contiguous and keep going. 03696 if (Reg != OldReg + Spacing) { 03697 Error(RegLoc, "non-contiguous register range"); 03698 return MatchOperand_ParseFail; 03699 } 03700 ++Count; 03701 // Parse the lane specifier if present. 03702 VectorLaneTy NextLaneKind; 03703 unsigned NextLaneIndex; 03704 SMLoc EndLoc = Parser.getTok().getLoc(); 03705 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success) 03706 return MatchOperand_ParseFail; 03707 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 03708 Error(EndLoc, "mismatched lane index in register list"); 03709 return MatchOperand_ParseFail; 03710 } 03711 } 03712 03713 if (Parser.getTok().isNot(AsmToken::RCurly)) { 03714 Error(Parser.getTok().getLoc(), "'}' expected"); 03715 return MatchOperand_ParseFail; 03716 } 03717 E = Parser.getTok().getEndLoc(); 03718 Parser.Lex(); // Eat '}' token. 03719 03720 switch (LaneKind) { 03721 case NoLanes: 03722 // Two-register operands have been converted to the 03723 // composite register classes. 03724 if (Count == 2) { 03725 const MCRegisterClass *RC = (Spacing == 1) ? 03726 &ARMMCRegisterClasses[ARM::DPairRegClassID] : 03727 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID]; 03728 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC); 03729 } 03730 03731 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, 03732 (Spacing == 2), S, E)); 03733 break; 03734 case AllLanes: 03735 // Two-register operands have been converted to the 03736 // composite register classes. 03737 if (Count == 2) { 03738 const MCRegisterClass *RC = (Spacing == 1) ? 03739 &ARMMCRegisterClasses[ARM::DPairRegClassID] : 03740 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID]; 03741 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC); 03742 } 03743 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count, 03744 (Spacing == 2), 03745 S, E)); 03746 break; 03747 case IndexedLane: 03748 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count, 03749 LaneIndex, 03750 (Spacing == 2), 03751 S, E)); 03752 break; 03753 } 03754 return MatchOperand_Success; 03755 } 03756 03757 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 03758 ARMAsmParser::OperandMatchResultTy 03759 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) { 03760 SMLoc S = Parser.getTok().getLoc(); 03761 const AsmToken &Tok = Parser.getTok(); 03762 unsigned Opt; 03763 03764 if (Tok.is(AsmToken::Identifier)) { 03765 StringRef OptStr = Tok.getString(); 03766 03767 Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower()) 03768 .Case("sy", ARM_MB::SY) 03769 .Case("st", ARM_MB::ST) 03770 .Case("ld", ARM_MB::LD) 03771 .Case("sh", ARM_MB::ISH) 03772 .Case("ish", ARM_MB::ISH) 03773 .Case("shst", ARM_MB::ISHST) 03774 .Case("ishst", ARM_MB::ISHST) 03775 .Case("ishld", ARM_MB::ISHLD) 03776 .Case("nsh", ARM_MB::NSH) 03777 .Case("un", ARM_MB::NSH) 03778 .Case("nshst", ARM_MB::NSHST) 03779 .Case("nshld", ARM_MB::NSHLD) 03780 .Case("unst", ARM_MB::NSHST) 03781 .Case("osh", ARM_MB::OSH) 03782 .Case("oshst", ARM_MB::OSHST) 03783 .Case("oshld", ARM_MB::OSHLD) 03784 .Default(~0U); 03785 03786 // ishld, oshld, nshld and ld are only available from ARMv8. 03787 if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD || 03788 Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD)) 03789 Opt = ~0U; 03790 03791 if (Opt == ~0U) 03792 return MatchOperand_NoMatch; 03793 03794 Parser.Lex(); // Eat identifier token. 03795 } else if (Tok.is(AsmToken::Hash) || 03796 Tok.is(AsmToken::Dollar) || 03797 Tok.is(AsmToken::Integer)) { 03798 if (Parser.getTok().isNot(AsmToken::Integer)) 03799 Parser.Lex(); // Eat '#' or '$'. 03800 SMLoc Loc = Parser.getTok().getLoc(); 03801 03802 const MCExpr *MemBarrierID; 03803 if (getParser().parseExpression(MemBarrierID)) { 03804 Error(Loc, "illegal expression"); 03805 return MatchOperand_ParseFail; 03806 } 03807 03808 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID); 03809 if (!CE) { 03810 Error(Loc, "constant expression expected"); 03811 return MatchOperand_ParseFail; 03812 } 03813 03814 int Val = CE->getValue(); 03815 if (Val & ~0xf) { 03816 Error(Loc, "immediate value out of range"); 03817 return MatchOperand_ParseFail; 03818 } 03819 03820 Opt = ARM_MB::RESERVED_0 + Val; 03821 } else 03822 return MatchOperand_ParseFail; 03823 03824 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 03825 return MatchOperand_Success; 03826 } 03827 03828 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options. 03829 ARMAsmParser::OperandMatchResultTy 03830 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) { 03831 SMLoc S = Parser.getTok().getLoc(); 03832 const AsmToken &Tok = Parser.getTok(); 03833 unsigned Opt; 03834 03835 if (Tok.is(AsmToken::Identifier)) { 03836 StringRef OptStr = Tok.getString(); 03837 03838 if (OptStr.equals_lower("sy")) 03839 Opt = ARM_ISB::SY; 03840 else 03841 return MatchOperand_NoMatch; 03842 03843 Parser.Lex(); // Eat identifier token. 03844 } else if (Tok.is(AsmToken::Hash) || 03845 Tok.is(AsmToken::Dollar) || 03846 Tok.is(AsmToken::Integer)) { 03847 if (Parser.getTok().isNot(AsmToken::Integer)) 03848 Parser.Lex(); // Eat '#' or '$'. 03849 SMLoc Loc = Parser.getTok().getLoc(); 03850 03851 const MCExpr *ISBarrierID; 03852 if (getParser().parseExpression(ISBarrierID)) { 03853 Error(Loc, "illegal expression"); 03854 return MatchOperand_ParseFail; 03855 } 03856 03857 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID); 03858 if (!CE) { 03859 Error(Loc, "constant expression expected"); 03860 return MatchOperand_ParseFail; 03861 } 03862 03863 int Val = CE->getValue(); 03864 if (Val & ~0xf) { 03865 Error(Loc, "immediate value out of range"); 03866 return MatchOperand_ParseFail; 03867 } 03868 03869 Opt = ARM_ISB::RESERVED_0 + Val; 03870 } else 03871 return MatchOperand_ParseFail; 03872 03873 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt( 03874 (ARM_ISB::InstSyncBOpt)Opt, S)); 03875 return MatchOperand_Success; 03876 } 03877 03878 03879 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 03880 ARMAsmParser::OperandMatchResultTy 03881 ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) { 03882 SMLoc S = Parser.getTok().getLoc(); 03883 const AsmToken &Tok = Parser.getTok(); 03884 if (!Tok.is(AsmToken::Identifier)) 03885 return MatchOperand_NoMatch; 03886 StringRef IFlagsStr = Tok.getString(); 03887 03888 // An iflags string of "none" is interpreted to mean that none of the AIF 03889 // bits are set. Not a terribly useful instruction, but a valid encoding. 03890 unsigned IFlags = 0; 03891 if (IFlagsStr != "none") { 03892 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 03893 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 03894 .Case("a", ARM_PROC::A) 03895 .Case("i", ARM_PROC::I) 03896 .Case("f", ARM_PROC::F) 03897 .Default(~0U); 03898 03899 // If some specific iflag is already set, it means that some letter is 03900 // present more than once, this is not acceptable. 03901 if (Flag == ~0U || (IFlags & Flag)) 03902 return MatchOperand_NoMatch; 03903 03904 IFlags |= Flag; 03905 } 03906 } 03907 03908 Parser.Lex(); // Eat identifier token. 03909 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 03910 return MatchOperand_Success; 03911 } 03912 03913 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 03914 ARMAsmParser::OperandMatchResultTy 03915 ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) { 03916 SMLoc S = Parser.getTok().getLoc(); 03917 const AsmToken &Tok = Parser.getTok(); 03918 if (!Tok.is(AsmToken::Identifier)) 03919 return MatchOperand_NoMatch; 03920 StringRef Mask = Tok.getString(); 03921 03922 if (isMClass()) { 03923 // See ARMv6-M 10.1.1 03924 std::string Name = Mask.lower(); 03925 unsigned FlagsVal = StringSwitch<unsigned>(Name) 03926 // Note: in the documentation: 03927 // ARM deprecates using MSR APSR without a _<bits> qualifier as an alias 03928 // for MSR APSR_nzcvq. 03929 // but we do make it an alias here. This is so to get the "mask encoding" 03930 // bits correct on MSR APSR writes. 03931 // 03932 // FIXME: Note the 0xc00 "mask encoding" bits version of the registers 03933 // should really only be allowed when writing a special register. Note 03934 // they get dropped in the MRS instruction reading a special register as 03935 // the SYSm field is only 8 bits. 03936 .Case("apsr", 0x800) 03937 .Case("apsr_nzcvq", 0x800) 03938 .Case("apsr_g", 0x400) 03939 .Case("apsr_nzcvqg", 0xc00) 03940 .Case("iapsr", 0x801) 03941 .Case("iapsr_nzcvq", 0x801) 03942 .Case("iapsr_g", 0x401) 03943 .Case("iapsr_nzcvqg", 0xc01) 03944 .Case("eapsr", 0x802) 03945 .Case("eapsr_nzcvq", 0x802) 03946 .Case("eapsr_g", 0x402) 03947 .Case("eapsr_nzcvqg", 0xc02) 03948 .Case("xpsr", 0x803) 03949 .Case("xpsr_nzcvq", 0x803) 03950 .Case("xpsr_g", 0x403) 03951 .Case("xpsr_nzcvqg", 0xc03) 03952 .Case("ipsr", 0x805) 03953 .Case("epsr", 0x806) 03954 .Case("iepsr", 0x807) 03955 .Case("msp", 0x808) 03956 .Case("psp", 0x809) 03957 .Case("primask", 0x810) 03958 .Case("basepri", 0x811) 03959 .Case("basepri_max", 0x812) 03960 .Case("faultmask", 0x813) 03961 .Case("control", 0x814) 03962 .Default(~0U); 03963 03964 if (FlagsVal == ~0U) 03965 return MatchOperand_NoMatch; 03966 03967 if (!hasThumb2DSP() && (FlagsVal & 0x400)) 03968 // The _g and _nzcvqg versions are only valid if the DSP extension is 03969 // available. 03970 return MatchOperand_NoMatch; 03971 03972 if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813) 03973 // basepri, basepri_max and faultmask only valid for V7m. 03974 return MatchOperand_NoMatch; 03975 03976 Parser.Lex(); // Eat identifier token. 03977 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 03978 return MatchOperand_Success; 03979 } 03980 03981 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 03982 size_t Start = 0, Next = Mask.find('_'); 03983 StringRef Flags = ""; 03984 std::string SpecReg = Mask.slice(Start, Next).lower(); 03985 if (Next != StringRef::npos) 03986 Flags = Mask.slice(Next+1, Mask.size()); 03987 03988 // FlagsVal contains the complete mask: 03989 // 3-0: Mask 03990 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 03991 unsigned FlagsVal = 0; 03992 03993 if (SpecReg == "apsr") { 03994 FlagsVal = StringSwitch<unsigned>(Flags) 03995 .Case("nzcvq", 0x8) // same as CPSR_f 03996 .Case("g", 0x4) // same as CPSR_s 03997 .Case("nzcvqg", 0xc) // same as CPSR_fs 03998 .Default(~0U); 03999 04000 if (FlagsVal == ~0U) { 04001 if (!Flags.empty()) 04002 return MatchOperand_NoMatch; 04003 else 04004 FlagsVal = 8; // No flag 04005 } 04006 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 04007 // cpsr_all is an alias for cpsr_fc, as is plain cpsr. 04008 if (Flags == "all" || Flags == "") 04009 Flags = "fc"; 04010 for (int i = 0, e = Flags.size(); i != e; ++i) { 04011 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 04012 .Case("c", 1) 04013 .Case("x", 2) 04014 .Case("s", 4) 04015 .Case("f", 8) 04016 .Default(~0U); 04017 04018 // If some specific flag is already set, it means that some letter is 04019 // present more than once, this is not acceptable. 04020 if (FlagsVal == ~0U || (FlagsVal & Flag)) 04021 return MatchOperand_NoMatch; 04022 FlagsVal |= Flag; 04023 } 04024 } else // No match for special register. 04025 return MatchOperand_NoMatch; 04026 04027 // Special register without flags is NOT equivalent to "fc" flags. 04028 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 04029 // two lines would enable gas compatibility at the expense of breaking 04030 // round-tripping. 04031 // 04032 // if (!FlagsVal) 04033 // FlagsVal = 0x9; 04034 04035 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 04036 if (SpecReg == "spsr") 04037 FlagsVal |= 16; 04038 04039 Parser.Lex(); // Eat identifier token. 04040 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 04041 return MatchOperand_Success; 04042 } 04043 04044 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for 04045 /// use in the MRS/MSR instructions added to support virtualization. 04046 ARMAsmParser::OperandMatchResultTy 04047 ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) { 04048 SMLoc S = Parser.getTok().getLoc(); 04049 const AsmToken &Tok = Parser.getTok(); 04050 if (!Tok.is(AsmToken::Identifier)) 04051 return MatchOperand_NoMatch; 04052 StringRef RegName = Tok.getString(); 04053 04054 // The values here come from B9.2.3 of the ARM ARM, where bits 4-0 are SysM 04055 // and bit 5 is R. 04056 unsigned Encoding = StringSwitch<unsigned>(RegName.lower()) 04057 .Case("r8_usr", 0x00) 04058 .Case("r9_usr", 0x01) 04059 .Case("r10_usr", 0x02) 04060 .Case("r11_usr", 0x03) 04061 .Case("r12_usr", 0x04) 04062 .Case("sp_usr", 0x05) 04063 .Case("lr_usr", 0x06) 04064 .Case("r8_fiq", 0x08) 04065 .Case("r9_fiq", 0x09) 04066 .Case("r10_fiq", 0x0a) 04067 .Case("r11_fiq", 0x0b) 04068 .Case("r12_fiq", 0x0c) 04069 .Case("sp_fiq", 0x0d) 04070 .Case("lr_fiq", 0x0e) 04071 .Case("lr_irq", 0x10) 04072 .Case("sp_irq", 0x11) 04073 .Case("lr_svc", 0x12) 04074 .Case("sp_svc", 0x13) 04075 .Case("lr_abt", 0x14) 04076 .Case("sp_abt", 0x15) 04077 .Case("lr_und", 0x16) 04078 .Case("sp_und", 0x17) 04079 .Case("lr_mon", 0x1c) 04080 .Case("sp_mon", 0x1d) 04081 .Case("elr_hyp", 0x1e) 04082 .Case("sp_hyp", 0x1f) 04083 .Case("spsr_fiq", 0x2e) 04084 .Case("spsr_irq", 0x30) 04085 .Case("spsr_svc", 0x32) 04086 .Case("spsr_abt", 0x34) 04087 .Case("spsr_und", 0x36) 04088 .Case("spsr_mon", 0x3c) 04089 .Case("spsr_hyp", 0x3e) 04090 .Default(~0U); 04091 04092 if (Encoding == ~0U) 04093 return MatchOperand_NoMatch; 04094 04095 Parser.Lex(); // Eat identifier token. 04096 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S)); 04097 return MatchOperand_Success; 04098 } 04099 04100 ARMAsmParser::OperandMatchResultTy 04101 ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low, 04102 int High) { 04103 const AsmToken &Tok = Parser.getTok(); 04104 if (Tok.isNot(AsmToken::Identifier)) { 04105 Error(Parser.getTok().getLoc(), Op + " operand expected."); 04106 return MatchOperand_ParseFail; 04107 } 04108 StringRef ShiftName = Tok.getString(); 04109 std::string LowerOp = Op.lower(); 04110 std::string UpperOp = Op.upper(); 04111 if (ShiftName != LowerOp && ShiftName != UpperOp) { 04112 Error(Parser.getTok().getLoc(), Op + " operand expected."); 04113 return MatchOperand_ParseFail; 04114 } 04115 Parser.Lex(); // Eat shift type token. 04116 04117 // There must be a '#' and a shift amount. 04118 if (Parser.getTok().isNot(AsmToken::Hash) && 04119 Parser.getTok().isNot(AsmToken::Dollar)) { 04120 Error(Parser.getTok().getLoc(), "'#' expected"); 04121 return MatchOperand_ParseFail; 04122 } 04123 Parser.Lex(); // Eat hash token. 04124 04125 const MCExpr *ShiftAmount; 04126 SMLoc Loc = Parser.getTok().getLoc(); 04127 SMLoc EndLoc; 04128 if (getParser().parseExpression(ShiftAmount, EndLoc)) { 04129 Error(Loc, "illegal expression"); 04130 return MatchOperand_ParseFail; 04131 } 04132 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 04133 if (!CE) { 04134 Error(Loc, "constant expression expected"); 04135 return MatchOperand_ParseFail; 04136 } 04137 int Val = CE->getValue(); 04138 if (Val < Low || Val > High) { 04139 Error(Loc, "immediate value out of range"); 04140 return MatchOperand_ParseFail; 04141 } 04142 04143 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc)); 04144 04145 return MatchOperand_Success; 04146 } 04147 04148 ARMAsmParser::OperandMatchResultTy 04149 ARMAsmParser::parseSetEndImm(OperandVector &Operands) { 04150 const AsmToken &Tok = Parser.getTok(); 04151 SMLoc S = Tok.getLoc(); 04152 if (Tok.isNot(AsmToken::Identifier)) { 04153 Error(S, "'be' or 'le' operand expected"); 04154 return MatchOperand_ParseFail; 04155 } 04156 int Val = StringSwitch<int>(Tok.getString().lower()) 04157 .Case("be", 1) 04158 .Case("le", 0) 04159 .Default(-1); 04160 Parser.Lex(); // Eat the token. 04161 04162 if (Val == -1) { 04163 Error(S, "'be' or 'le' operand expected"); 04164 return MatchOperand_ParseFail; 04165 } 04166 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 04167 getContext()), 04168 S, Tok.getEndLoc())); 04169 return MatchOperand_Success; 04170 } 04171 04172 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 04173 /// instructions. Legal values are: 04174 /// lsl #n 'n' in [0,31] 04175 /// asr #n 'n' in [1,32] 04176 /// n == 32 encoded as n == 0. 04177 ARMAsmParser::OperandMatchResultTy 04178 ARMAsmParser::parseShifterImm(OperandVector &Operands) { 04179 const AsmToken &Tok = Parser.getTok(); 04180 SMLoc S = Tok.getLoc(); 04181 if (Tok.isNot(AsmToken::Identifier)) { 04182 Error(S, "shift operator 'asr' or 'lsl' expected"); 04183 return MatchOperand_ParseFail; 04184 } 04185 StringRef ShiftName = Tok.getString(); 04186 bool isASR; 04187 if (ShiftName == "lsl" || ShiftName == "LSL") 04188 isASR = false; 04189 else if (ShiftName == "asr" || ShiftName == "ASR") 04190 isASR = true; 04191 else { 04192 Error(S, "shift operator 'asr' or 'lsl' expected"); 04193 return MatchOperand_ParseFail; 04194 } 04195 Parser.Lex(); // Eat the operator. 04196 04197 // A '#' and a shift amount. 04198 if (Parser.getTok().isNot(AsmToken::Hash) && 04199 Parser.getTok().isNot(AsmToken::Dollar)) { 04200 Error(Parser.getTok().getLoc(), "'#' expected"); 04201 return MatchOperand_ParseFail; 04202 } 04203 Parser.Lex(); // Eat hash token. 04204 SMLoc ExLoc = Parser.getTok().getLoc(); 04205 04206 const MCExpr *ShiftAmount; 04207 SMLoc EndLoc; 04208 if (getParser().parseExpression(ShiftAmount, EndLoc)) { 04209 Error(ExLoc, "malformed shift expression"); 04210 return MatchOperand_ParseFail; 04211 } 04212 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 04213 if (!CE) { 04214 Error(ExLoc, "shift amount must be an immediate"); 04215 return MatchOperand_ParseFail; 04216 } 04217 04218 int64_t Val = CE->getValue(); 04219 if (isASR) { 04220 // Shift amount must be in [1,32] 04221 if (Val < 1 || Val > 32) { 04222 Error(ExLoc, "'asr' shift amount must be in range [1,32]"); 04223 return MatchOperand_ParseFail; 04224 } 04225 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 04226 if (isThumb() && Val == 32) { 04227 Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode"); 04228 return MatchOperand_ParseFail; 04229 } 04230 if (Val == 32) Val = 0; 04231 } else { 04232 // Shift amount must be in [1,32] 04233 if (Val < 0 || Val > 31) { 04234 Error(ExLoc, "'lsr' shift amount must be in range [0,31]"); 04235 return MatchOperand_ParseFail; 04236 } 04237 } 04238 04239 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc)); 04240 04241 return MatchOperand_Success; 04242 } 04243 04244 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 04245 /// of instructions. Legal values are: 04246 /// ror #n 'n' in {0, 8, 16, 24} 04247 ARMAsmParser::OperandMatchResultTy 04248 ARMAsmParser::parseRotImm(OperandVector &Operands) { 04249 const AsmToken &Tok = Parser.getTok(); 04250 SMLoc S = Tok.getLoc(); 04251 if (Tok.isNot(AsmToken::Identifier)) 04252 return MatchOperand_NoMatch; 04253 StringRef ShiftName = Tok.getString(); 04254 if (ShiftName != "ror" && ShiftName != "ROR") 04255 return MatchOperand_NoMatch; 04256 Parser.Lex(); // Eat the operator. 04257 04258 // A '#' and a rotate amount. 04259 if (Parser.getTok().isNot(AsmToken::Hash) && 04260 Parser.getTok().isNot(AsmToken::Dollar)) { 04261 Error(Parser.getTok().getLoc(), "'#' expected"); 04262 return MatchOperand_ParseFail; 04263 } 04264 Parser.Lex(); // Eat hash token. 04265 SMLoc ExLoc = Parser.getTok().getLoc(); 04266 04267 const MCExpr *ShiftAmount; 04268 SMLoc EndLoc; 04269 if (getParser().parseExpression(ShiftAmount, EndLoc)) { 04270 Error(ExLoc, "malformed rotate expression"); 04271 return MatchOperand_ParseFail; 04272 } 04273 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 04274 if (!CE) { 04275 Error(ExLoc, "rotate amount must be an immediate"); 04276 return MatchOperand_ParseFail; 04277 } 04278 04279 int64_t Val = CE->getValue(); 04280 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 04281 // normally, zero is represented in asm by omitting the rotate operand 04282 // entirely. 04283 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 04284 Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24"); 04285 return MatchOperand_ParseFail; 04286 } 04287 04288 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc)); 04289 04290 return MatchOperand_Success; 04291 } 04292 04293 ARMAsmParser::OperandMatchResultTy 04294 ARMAsmParser::parseBitfield(OperandVector &Operands) { 04295 SMLoc S = Parser.getTok().getLoc(); 04296 // The bitfield descriptor is really two operands, the LSB and the width. 04297 if (Parser.getTok().isNot(AsmToken::Hash) && 04298 Parser.getTok().isNot(AsmToken::Dollar)) { 04299 Error(Parser.getTok().getLoc(), "'#' expected"); 04300 return MatchOperand_ParseFail; 04301 } 04302 Parser.Lex(); // Eat hash token. 04303 04304 const MCExpr *LSBExpr; 04305 SMLoc E = Parser.getTok().getLoc(); 04306 if (getParser().parseExpression(LSBExpr)) { 04307 Error(E, "malformed immediate expression"); 04308 return MatchOperand_ParseFail; 04309 } 04310 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 04311 if (!CE) { 04312 Error(E, "'lsb' operand must be an immediate"); 04313 return MatchOperand_ParseFail; 04314 } 04315 04316 int64_t LSB = CE->getValue(); 04317 // The LSB must be in the range [0,31] 04318 if (LSB < 0 || LSB > 31) { 04319 Error(E, "'lsb' operand must be in the range [0,31]"); 04320 return MatchOperand_ParseFail; 04321 } 04322 E = Parser.getTok().getLoc(); 04323 04324 // Expect another immediate operand. 04325 if (Parser.getTok().isNot(AsmToken::Comma)) { 04326 Error(Parser.getTok().getLoc(), "too few operands"); 04327 return MatchOperand_ParseFail; 04328 } 04329 Parser.Lex(); // Eat hash token. 04330 if (Parser.getTok().isNot(AsmToken::Hash) && 04331 Parser.getTok().isNot(AsmToken::Dollar)) { 04332 Error(Parser.getTok().getLoc(), "'#' expected"); 04333 return MatchOperand_ParseFail; 04334 } 04335 Parser.Lex(); // Eat hash token. 04336 04337 const MCExpr *WidthExpr; 04338 SMLoc EndLoc; 04339 if (getParser().parseExpression(WidthExpr, EndLoc)) { 04340 Error(E, "malformed immediate expression"); 04341 return MatchOperand_ParseFail; 04342 } 04343 CE = dyn_cast<MCConstantExpr>(WidthExpr); 04344 if (!CE) { 04345 Error(E, "'width' operand must be an immediate"); 04346 return MatchOperand_ParseFail; 04347 } 04348 04349 int64_t Width = CE->getValue(); 04350 // The LSB must be in the range [1,32-lsb] 04351 if (Width < 1 || Width > 32 - LSB) { 04352 Error(E, "'width' operand must be in the range [1,32-lsb]"); 04353 return MatchOperand_ParseFail; 04354 } 04355 04356 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc)); 04357 04358 return MatchOperand_Success; 04359 } 04360 04361 ARMAsmParser::OperandMatchResultTy 04362 ARMAsmParser::parsePostIdxReg(OperandVector &Operands) { 04363 // Check for a post-index addressing register operand. Specifically: 04364 // postidx_reg := '+' register {, shift} 04365 // | '-' register {, shift} 04366 // | register {, shift} 04367 04368 // This method must return MatchOperand_NoMatch without consuming any tokens 04369 // in the case where there is no match, as other alternatives take other 04370 // parse methods. 04371 AsmToken Tok = Parser.getTok(); 04372 SMLoc S = Tok.getLoc(); 04373 bool haveEaten = false; 04374 bool isAdd = true; 04375 if (Tok.is(AsmToken::Plus)) { 04376 Parser.Lex(); // Eat the '+' token. 04377 haveEaten = true; 04378 } else if (Tok.is(AsmToken::Minus)) { 04379 Parser.Lex(); // Eat the '-' token. 04380 isAdd = false; 04381 haveEaten = true; 04382 } 04383 04384 SMLoc E = Parser.getTok().getEndLoc(); 04385 int Reg = tryParseRegister(); 04386 if (Reg == -1) { 04387 if (!haveEaten) 04388 return MatchOperand_NoMatch; 04389 Error(Parser.getTok().getLoc(), "register expected"); 04390 return MatchOperand_ParseFail; 04391 } 04392 04393 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 04394 unsigned ShiftImm = 0; 04395 if (Parser.getTok().is(AsmToken::Comma)) { 04396 Parser.Lex(); // Eat the ','. 04397 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 04398 return MatchOperand_ParseFail; 04399 04400 // FIXME: Only approximates end...may include intervening whitespace. 04401 E = Parser.getTok().getLoc(); 04402 } 04403 04404 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 04405 ShiftImm, S, E)); 04406 04407 return MatchOperand_Success; 04408 } 04409 04410 ARMAsmParser::OperandMatchResultTy 04411 ARMAsmParser::parseAM3Offset(OperandVector &Operands) { 04412 // Check for a post-index addressing register operand. Specifically: 04413 // am3offset := '+' register 04414 // | '-' register 04415 // | register 04416 // | # imm 04417 // | # + imm 04418 // | # - imm 04419 04420 // This method must return MatchOperand_NoMatch without consuming any tokens 04421 // in the case where there is no match, as other alternatives take other 04422 // parse methods. 04423 AsmToken Tok = Parser.getTok(); 04424 SMLoc S = Tok.getLoc(); 04425 04426 // Do immediates first, as we always parse those if we have a '#'. 04427 if (Parser.getTok().is(AsmToken::Hash) || 04428 Parser.getTok().is(AsmToken::Dollar)) { 04429 Parser.Lex(); // Eat '#' or '$'. 04430 // Explicitly look for a '-', as we need to encode negative zero 04431 // differently. 04432 bool isNegative = Parser.getTok().is(AsmToken::Minus); 04433 const MCExpr *Offset; 04434 SMLoc E; 04435 if (getParser().parseExpression(Offset, E)) 04436 return MatchOperand_ParseFail; 04437 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 04438 if (!CE) { 04439 Error(S, "constant expression expected"); 04440 return MatchOperand_ParseFail; 04441 } 04442 // Negative zero is encoded as the flag value INT32_MIN. 04443 int32_t Val = CE->getValue(); 04444 if (isNegative && Val == 0) 04445 Val = INT32_MIN; 04446 04447 Operands.push_back( 04448 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 04449 04450 return MatchOperand_Success; 04451 } 04452 04453 04454 bool haveEaten = false; 04455 bool isAdd = true; 04456 if (Tok.is(AsmToken::Plus)) { 04457 Parser.Lex(); // Eat the '+' token. 04458 haveEaten = true; 04459 } else if (Tok.is(AsmToken::Minus)) { 04460 Parser.Lex(); // Eat the '-' token. 04461 isAdd = false; 04462 haveEaten = true; 04463 } 04464 04465 Tok = Parser.getTok(); 04466 int Reg = tryParseRegister(); 04467 if (Reg == -1) { 04468 if (!haveEaten) 04469 return MatchOperand_NoMatch; 04470 Error(Tok.getLoc(), "register expected"); 04471 return MatchOperand_ParseFail; 04472 } 04473 04474 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 04475 0, S, Tok.getEndLoc())); 04476 04477 return MatchOperand_Success; 04478 } 04479 04480 /// Convert parsed operands to MCInst. Needed here because this instruction 04481 /// only has two register operands, but multiplication is commutative so 04482 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN". 04483 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst, 04484 const OperandVector &Operands) { 04485 ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); 04486 ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1); 04487 // If we have a three-operand form, make sure to set Rn to be the operand 04488 // that isn't the same as Rd. 04489 unsigned RegOp = 4; 04490 if (Operands.size() == 6 && 04491 ((ARMOperand &)*Operands[4]).getReg() == 04492 ((ARMOperand &)*Operands[3]).getReg()) 04493 RegOp = 5; 04494 ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1); 04495 Inst.addOperand(Inst.getOperand(0)); 04496 ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2); 04497 } 04498 04499 void ARMAsmParser::cvtThumbBranches(MCInst &Inst, 04500 const OperandVector &Operands) { 04501 int CondOp = -1, ImmOp = -1; 04502 switch(Inst.getOpcode()) { 04503 case ARM::tB: 04504 case ARM::tBcc: CondOp = 1; ImmOp = 2; break; 04505 04506 case ARM::t2B: 04507 case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break; 04508 04509 default: llvm_unreachable("Unexpected instruction in cvtThumbBranches"); 04510 } 04511 // first decide whether or not the branch should be conditional 04512 // by looking at it's location relative to an IT block 04513 if(inITBlock()) { 04514 // inside an IT block we cannot have any conditional branches. any 04515 // such instructions needs to be converted to unconditional form 04516 switch(Inst.getOpcode()) { 04517 case ARM::tBcc: Inst.setOpcode(ARM::tB); break; 04518 case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break; 04519 } 04520 } else { 04521 // outside IT blocks we can only have unconditional branches with AL 04522 // condition code or conditional branches with non-AL condition code 04523 unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode(); 04524 switch(Inst.getOpcode()) { 04525 case ARM::tB: 04526 case ARM::tBcc: 04527 Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc); 04528 break; 04529 case ARM::t2B: 04530 case ARM::t2Bcc: 04531 Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc); 04532 break; 04533 } 04534 } 04535 04536 // now decide on encoding size based on branch target range 04537 switch(Inst.getOpcode()) { 04538 // classify tB as either t2B or t1B based on range of immediate operand 04539 case ARM::tB: { 04540 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]); 04541 if (!op.isSignedOffset<11, 1>() && isThumbTwo()) 04542 Inst.setOpcode(ARM::t2B); 04543 break; 04544 } 04545 // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand 04546 case ARM::tBcc: { 04547 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]); 04548 if (!op.isSignedOffset<8, 1>() && isThumbTwo()) 04549 Inst.setOpcode(ARM::t2Bcc); 04550 break; 04551 } 04552 } 04553 ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1); 04554 ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2); 04555 } 04556 04557 /// Parse an ARM memory expression, return false if successful else return true 04558 /// or an error. The first token must be a '[' when called. 04559 bool ARMAsmParser::parseMemory(OperandVector &Operands) { 04560 SMLoc S, E; 04561 assert(Parser.getTok().is(AsmToken::LBrac) && 04562 "Token is not a Left Bracket"); 04563 S = Parser.getTok().getLoc(); 04564 Parser.Lex(); // Eat left bracket token. 04565 04566 const AsmToken &BaseRegTok = Parser.getTok(); 04567 int BaseRegNum = tryParseRegister(); 04568 if (BaseRegNum == -1) 04569 return Error(BaseRegTok.getLoc(), "register expected"); 04570 04571 // The next token must either be a comma, a colon or a closing bracket. 04572 const AsmToken &Tok = Parser.getTok(); 04573 if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) && 04574 !Tok.is(AsmToken::RBrac)) 04575 return Error(Tok.getLoc(), "malformed memory operand"); 04576 04577 if (Tok.is(AsmToken::RBrac)) { 04578 E = Tok.getEndLoc(); 04579 Parser.Lex(); // Eat right bracket token. 04580 04581 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0, 04582 ARM_AM::no_shift, 0, 0, false, 04583 S, E)); 04584 04585 // If there's a pre-indexing writeback marker, '!', just add it as a token 04586 // operand. It's rather odd, but syntactically valid. 04587 if (Parser.getTok().is(AsmToken::Exclaim)) { 04588 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 04589 Parser.Lex(); // Eat the '!'. 04590 } 04591 04592 return false; 04593 } 04594 04595 assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) && 04596 "Lost colon or comma in memory operand?!"); 04597 if (Tok.is(AsmToken::Comma)) { 04598 Parser.Lex(); // Eat the comma. 04599 } 04600 04601 // If we have a ':', it's an alignment specifier. 04602 if (Parser.getTok().is(AsmToken::Colon)) { 04603 Parser.Lex(); // Eat the ':'. 04604 E = Parser.getTok().getLoc(); 04605 SMLoc AlignmentLoc = Tok.getLoc(); 04606 04607 const MCExpr *Expr; 04608 if (getParser().parseExpression(Expr)) 04609 return true; 04610 04611 // The expression has to be a constant. Memory references with relocations 04612 // don't come through here, as they use the <label> forms of the relevant 04613 // instructions. 04614 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 04615 if (!CE) 04616 return Error (E, "constant expression expected"); 04617 04618 unsigned Align = 0; 04619 switch (CE->getValue()) { 04620 default: 04621 return Error(E, 04622 "alignment specifier must be 16, 32, 64, 128, or 256 bits"); 04623 case 16: Align = 2; break; 04624 case 32: Align = 4; break; 04625 case 64: Align = 8; break; 04626 case 128: Align = 16; break; 04627 case 256: Align = 32; break; 04628 } 04629 04630 // Now we should have the closing ']' 04631 if (Parser.getTok().isNot(AsmToken::RBrac)) 04632 return Error(Parser.getTok().getLoc(), "']' expected"); 04633 E = Parser.getTok().getEndLoc(); 04634 Parser.Lex(); // Eat right bracket token. 04635 04636 // Don't worry about range checking the value here. That's handled by 04637 // the is*() predicates. 04638 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0, 04639 ARM_AM::no_shift, 0, Align, 04640 false, S, E, AlignmentLoc)); 04641 04642 // If there's a pre-indexing writeback marker, '!', just add it as a token 04643 // operand. 04644 if (Parser.getTok().is(AsmToken::Exclaim)) { 04645 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 04646 Parser.Lex(); // Eat the '!'. 04647 } 04648 04649 return false; 04650 } 04651 04652 // If we have a '#', it's an immediate offset, else assume it's a register 04653 // offset. Be friendly and also accept a plain integer (without a leading 04654 // hash) for gas compatibility. 04655 if (Parser.getTok().is(AsmToken::Hash) || 04656 Parser.getTok().is(AsmToken::Dollar) || 04657 Parser.getTok().is(AsmToken::Integer)) { 04658 if (Parser.getTok().isNot(AsmToken::Integer)) 04659 Parser.Lex(); // Eat '#' or '$'. 04660 E = Parser.getTok().getLoc(); 04661 04662 bool isNegative = getParser().getTok().is(AsmToken::Minus); 04663 const MCExpr *Offset; 04664 if (getParser().parseExpression(Offset)) 04665 return true; 04666 04667 // The expression has to be a constant. Memory references with relocations 04668 // don't come through here, as they use the <label> forms of the relevant 04669 // instructions. 04670 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 04671 if (!CE) 04672 return Error (E, "constant expression expected"); 04673 04674 // If the constant was #-0, represent it as INT32_MIN. 04675 int32_t Val = CE->getValue(); 04676 if (isNegative && Val == 0) 04677 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 04678 04679 // Now we should have the closing ']' 04680 if (Parser.getTok().isNot(AsmToken::RBrac)) 04681 return Error(Parser.getTok().getLoc(), "']' expected"); 04682 E = Parser.getTok().getEndLoc(); 04683 Parser.Lex(); // Eat right bracket token. 04684 04685 // Don't worry about range checking the value here. That's handled by 04686 // the is*() predicates. 04687 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 04688 ARM_AM::no_shift, 0, 0, 04689 false, S, E)); 04690 04691 // If there's a pre-indexing writeback marker, '!', just add it as a token 04692 // operand. 04693 if (Parser.getTok().is(AsmToken::Exclaim)) { 04694 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 04695 Parser.Lex(); // Eat the '!'. 04696 } 04697 04698 return false; 04699 } 04700 04701 // The register offset is optionally preceded by a '+' or '-' 04702 bool isNegative = false; 04703 if (Parser.getTok().is(AsmToken::Minus)) { 04704 isNegative = true; 04705 Parser.Lex(); // Eat the '-'. 04706 } else if (Parser.getTok().is(AsmToken::Plus)) { 04707 // Nothing to do. 04708 Parser.Lex(); // Eat the '+'. 04709 } 04710 04711 E = Parser.getTok().getLoc(); 04712 int OffsetRegNum = tryParseRegister(); 04713 if (OffsetRegNum == -1) 04714 return Error(E, "register expected"); 04715 04716 // If there's a shift operator, handle it. 04717 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 04718 unsigned ShiftImm = 0; 04719 if (Parser.getTok().is(AsmToken::Comma)) { 04720 Parser.Lex(); // Eat the ','. 04721 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 04722 return true; 04723 } 04724 04725 // Now we should have the closing ']' 04726 if (Parser.getTok().isNot(AsmToken::RBrac)) 04727 return Error(Parser.getTok().getLoc(), "']' expected"); 04728 E = Parser.getTok().getEndLoc(); 04729 Parser.Lex(); // Eat right bracket token. 04730 04731 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum, 04732 ShiftType, ShiftImm, 0, isNegative, 04733 S, E)); 04734 04735 // If there's a pre-indexing writeback marker, '!', just add it as a token 04736 // operand. 04737 if (Parser.getTok().is(AsmToken::Exclaim)) { 04738 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 04739 Parser.Lex(); // Eat the '!'. 04740 } 04741 04742 return false; 04743 } 04744 04745 /// parseMemRegOffsetShift - one of these two: 04746 /// ( lsl | lsr | asr | ror ) , # shift_amount 04747 /// rrx 04748 /// return true if it parses a shift otherwise it returns false. 04749 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 04750 unsigned &Amount) { 04751 SMLoc Loc = Parser.getTok().getLoc(); 04752 const AsmToken &Tok = Parser.getTok(); 04753 if (Tok.isNot(AsmToken::Identifier)) 04754 return true; 04755 StringRef ShiftName = Tok.getString(); 04756 if (ShiftName == "lsl" || ShiftName == "LSL" || 04757 ShiftName == "asl" || ShiftName == "ASL") 04758 St = ARM_AM::lsl; 04759 else if (ShiftName == "lsr" || ShiftName == "LSR") 04760 St = ARM_AM::lsr; 04761 else if (ShiftName == "asr" || ShiftName == "ASR") 04762 St = ARM_AM::asr; 04763 else if (ShiftName == "ror" || ShiftName == "ROR") 04764 St = ARM_AM::ror; 04765 else if (ShiftName == "rrx" || ShiftName == "RRX") 04766 St = ARM_AM::rrx; 04767 else 04768 return Error(Loc, "illegal shift operator"); 04769 Parser.Lex(); // Eat shift type token. 04770 04771 // rrx stands alone. 04772 Amount = 0; 04773 if (St != ARM_AM::rrx) { 04774 Loc = Parser.getTok().getLoc(); 04775 // A '#' and a shift amount. 04776 const AsmToken &HashTok = Parser.getTok(); 04777 if (HashTok.isNot(AsmToken::Hash) && 04778 HashTok.isNot(AsmToken::Dollar)) 04779 return Error(HashTok.getLoc(), "'#' expected"); 04780 Parser.Lex(); // Eat hash token. 04781 04782 const MCExpr *Expr; 04783 if (getParser().parseExpression(Expr)) 04784 return true; 04785 // Range check the immediate. 04786 // lsl, ror: 0 <= imm <= 31 04787 // lsr, asr: 0 <= imm <= 32 04788 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 04789 if (!CE) 04790 return Error(Loc, "shift amount must be an immediate"); 04791 int64_t Imm = CE->getValue(); 04792 if (Imm < 0 || 04793 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 04794 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 04795 return Error(Loc, "immediate shift value out of range"); 04796 // If <ShiftTy> #0, turn it into a no_shift. 04797 if (Imm == 0) 04798 St = ARM_AM::lsl; 04799 // For consistency, treat lsr #32 and asr #32 as having immediate value 0. 04800 if (Imm == 32) 04801 Imm = 0; 04802 Amount = Imm; 04803 } 04804 04805 return false; 04806 } 04807 04808 /// parseFPImm - A floating point immediate expression operand. 04809 ARMAsmParser::OperandMatchResultTy 04810 ARMAsmParser::parseFPImm(OperandVector &Operands) { 04811 // Anything that can accept a floating point constant as an operand 04812 // needs to go through here, as the regular parseExpression is 04813 // integer only. 04814 // 04815 // This routine still creates a generic Immediate operand, containing 04816 // a bitcast of the 64-bit floating point value. The various operands 04817 // that accept floats can check whether the value is valid for them 04818 // via the standard is*() predicates. 04819 04820 SMLoc S = Parser.getTok().getLoc(); 04821 04822 if (Parser.getTok().isNot(AsmToken::Hash) && 04823 Parser.getTok().isNot(AsmToken::Dollar)) 04824 return MatchOperand_NoMatch; 04825 04826 // Disambiguate the VMOV forms that can accept an FP immediate. 04827 // vmov.f32 <sreg>, #imm 04828 // vmov.f64 <dreg>, #imm 04829 // vmov.f32 <dreg>, #imm @ vector f32x2 04830 // vmov.f32 <qreg>, #imm @ vector f32x4 04831 // 04832 // There are also the NEON VMOV instructions which expect an 04833 // integer constant. Make sure we don't try to parse an FPImm 04834 // for these: 04835 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 04836 ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]); 04837 bool isVmovf = TyOp.isToken() && 04838 (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64"); 04839 ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]); 04840 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" || 04841 Mnemonic.getToken() == "fconsts"); 04842 if (!(isVmovf || isFconst)) 04843 return MatchOperand_NoMatch; 04844 04845 Parser.Lex(); // Eat '#' or '$'. 04846 04847 // Handle negation, as that still comes through as a separate token. 04848 bool isNegative = false; 04849 if (Parser.getTok().is(AsmToken::Minus)) { 04850 isNegative = true; 04851 Parser.Lex(); 04852 } 04853 const AsmToken &Tok = Parser.getTok(); 04854 SMLoc Loc = Tok.getLoc(); 04855 if (Tok.is(AsmToken::Real) && isVmovf) { 04856 APFloat RealVal(APFloat::IEEEsingle, Tok.getString()); 04857 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 04858 // If we had a '-' in front, toggle the sign bit. 04859 IntVal ^= (uint64_t)isNegative << 31; 04860 Parser.Lex(); // Eat the token. 04861 Operands.push_back(ARMOperand::CreateImm( 04862 MCConstantExpr::Create(IntVal, getContext()), 04863 S, Parser.getTok().getLoc())); 04864 return MatchOperand_Success; 04865 } 04866 // Also handle plain integers. Instructions which allow floating point 04867 // immediates also allow a raw encoded 8-bit value. 04868 if (Tok.is(AsmToken::Integer) && isFconst) { 04869 int64_t Val = Tok.getIntVal(); 04870 Parser.Lex(); // Eat the token. 04871 if (Val > 255 || Val < 0) { 04872 Error(Loc, "encoded floating point value out of range"); 04873 return MatchOperand_ParseFail; 04874 } 04875 float RealVal = ARM_AM::getFPImmFloat(Val); 04876 Val = APFloat(RealVal).bitcastToAPInt().getZExtValue(); 04877 04878 Operands.push_back(ARMOperand::CreateImm( 04879 MCConstantExpr::Create(Val, getContext()), S, 04880 Parser.getTok().getLoc())); 04881 return MatchOperand_Success; 04882 } 04883 04884 Error(Loc, "invalid floating point immediate"); 04885 return MatchOperand_ParseFail; 04886 } 04887 04888 /// Parse a arm instruction operand. For now this parses the operand regardless 04889 /// of the mnemonic. 04890 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) { 04891 SMLoc S, E; 04892 04893 // Check if the current operand has a custom associated parser, if so, try to 04894 // custom parse the operand, or fallback to the general approach. 04895 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 04896 if (ResTy == MatchOperand_Success) 04897 return false; 04898 // If there wasn't a custom match, try the generic matcher below. Otherwise, 04899 // there was a match, but an error occurred, in which case, just return that 04900 // the operand parsing failed. 04901 if (ResTy == MatchOperand_ParseFail) 04902 return true; 04903 04904 switch (getLexer().getKind()) { 04905 default: 04906 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 04907 return true; 04908 case AsmToken::Identifier: { 04909 // If we've seen a branch mnemonic, the next operand must be a label. This 04910 // is true even if the label is a register name. So "br r1" means branch to 04911 // label "r1". 04912 bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl"; 04913 if (!ExpectLabel) { 04914 if (!tryParseRegisterWithWriteBack(Operands)) 04915 return false; 04916 int Res = tryParseShiftRegister(Operands); 04917 if (Res == 0) // success 04918 return false; 04919 else if (Res == -1) // irrecoverable error 04920 return true; 04921 // If this is VMRS, check for the apsr_nzcv operand. 04922 if (Mnemonic == "vmrs" && 04923 Parser.getTok().getString().equals_lower("apsr_nzcv")) { 04924 S = Parser.getTok().getLoc(); 04925 Parser.Lex(); 04926 Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S)); 04927 return false; 04928 } 04929 } 04930 04931 // Fall though for the Identifier case that is not a register or a 04932 // special name. 04933 } 04934 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 04935 case AsmToken::Integer: // things like 1f and 2b as a branch targets 04936 case AsmToken::String: // quoted label names. 04937 case AsmToken::Dot: { // . as a branch target 04938 // This was not a register so parse other operands that start with an 04939 // identifier (like labels) as expressions and create them as immediates. 04940 const MCExpr *IdVal; 04941 S = Parser.getTok().getLoc(); 04942 if (getParser().parseExpression(IdVal)) 04943 return true; 04944 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 04945 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 04946 return false; 04947 } 04948 case AsmToken::LBrac: 04949 return parseMemory(Operands); 04950 case AsmToken::LCurly: 04951 return parseRegisterList(Operands); 04952 case AsmToken::Dollar: 04953 case AsmToken::Hash: { 04954 // #42 -> immediate. 04955 S = Parser.getTok().getLoc(); 04956 Parser.Lex(); 04957 04958 if (Parser.getTok().isNot(AsmToken::Colon)) { 04959 bool isNegative = Parser.getTok().is(AsmToken::Minus); 04960 const MCExpr *ImmVal; 04961 if (getParser().parseExpression(ImmVal)) 04962 return true; 04963 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 04964 if (CE) { 04965 int32_t Val = CE->getValue(); 04966 if (isNegative && Val == 0) 04967 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 04968 } 04969 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 04970 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 04971 04972 // There can be a trailing '!' on operands that we want as a separate 04973 // '!' Token operand. Handle that here. For example, the compatibility 04974 // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'. 04975 if (Parser.getTok().is(AsmToken::Exclaim)) { 04976 Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(), 04977 Parser.getTok().getLoc())); 04978 Parser.Lex(); // Eat exclaim token 04979 } 04980 return false; 04981 } 04982 // w/ a ':' after the '#', it's just like a plain ':'. 04983 // FALLTHROUGH 04984 } 04985 case AsmToken::Colon: { 04986 // ":lower16:" and ":upper16:" expression prefixes 04987 // FIXME: Check it's an expression prefix, 04988 // e.g. (FOO - :lower16:BAR) isn't legal. 04989 ARMMCExpr::VariantKind RefKind; 04990 if (parsePrefix(RefKind)) 04991 return true; 04992 04993 const MCExpr *SubExprVal; 04994 if (getParser().parseExpression(SubExprVal)) 04995 return true; 04996 04997 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 04998 getContext()); 04999 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 05000 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 05001 return false; 05002 } 05003 case AsmToken::Equal: { 05004 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val) 05005 return Error(Parser.getTok().getLoc(), "unexpected token in operand"); 05006 05007 Parser.Lex(); // Eat '=' 05008 const MCExpr *SubExprVal; 05009 if (getParser().parseExpression(SubExprVal)) 05010 return true; 05011 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 05012 05013 const MCExpr *CPLoc = getTargetStreamer().addConstantPoolEntry(SubExprVal); 05014 Operands.push_back(ARMOperand::CreateImm(CPLoc, S, E)); 05015 return false; 05016 } 05017 } 05018 } 05019 05020 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 05021 // :lower16: and :upper16:. 05022 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 05023 RefKind = ARMMCExpr::VK_ARM_None; 05024 05025 // consume an optional '#' (GNU compatibility) 05026 if (getLexer().is(AsmToken::Hash)) 05027 Parser.Lex(); 05028 05029 // :lower16: and :upper16: modifiers 05030 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 05031 Parser.Lex(); // Eat ':' 05032 05033 if (getLexer().isNot(AsmToken::Identifier)) { 05034 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 05035 return true; 05036 } 05037 05038 StringRef IDVal = Parser.getTok().getIdentifier(); 05039 if (IDVal == "lower16") { 05040 RefKind = ARMMCExpr::VK_ARM_LO16; 05041 } else if (IDVal == "upper16") { 05042 RefKind = ARMMCExpr::VK_ARM_HI16; 05043 } else { 05044 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 05045 return true; 05046 } 05047 Parser.Lex(); 05048 05049 if (getLexer().isNot(AsmToken::Colon)) { 05050 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 05051 return true; 05052 } 05053 Parser.Lex(); // Eat the last ':' 05054 return false; 05055 } 05056 05057 /// \brief Given a mnemonic, split out possible predication code and carry 05058 /// setting letters to form a canonical mnemonic and flags. 05059 // 05060 // FIXME: Would be nice to autogen this. 05061 // FIXME: This is a bit of a maze of special cases. 05062 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 05063 unsigned &PredicationCode, 05064 bool &CarrySetting, 05065 unsigned &ProcessorIMod, 05066 StringRef &ITMask) { 05067 PredicationCode = ARMCC::AL; 05068 CarrySetting = false; 05069 ProcessorIMod = 0; 05070 05071 // Ignore some mnemonics we know aren't predicated forms. 05072 // 05073 // FIXME: Would be nice to autogen this. 05074 if ((Mnemonic == "movs" && isThumb()) || 05075 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 05076 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 05077 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 05078 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 05079 Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" || 05080 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 05081 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 05082 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || 05083 Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || 05084 Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || 05085 Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" || 05086 Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic.startswith("vsel")) 05087 return Mnemonic; 05088 05089 // First, split out any predication code. Ignore mnemonics we know aren't 05090 // predicated but do have a carry-set and so weren't caught above. 05091 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 05092 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 05093 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 05094 Mnemonic != "sbcs" && Mnemonic != "rscs") { 05095 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 05096 .Case("eq", ARMCC::EQ) 05097 .Case("ne", ARMCC::NE) 05098 .Case("hs", ARMCC::HS) 05099 .Case("cs", ARMCC::HS) 05100 .Case("lo", ARMCC::LO) 05101 .Case("cc", ARMCC::LO) 05102 .Case("mi", ARMCC::MI) 05103 .Case("pl", ARMCC::PL) 05104 .Case("vs", ARMCC::VS) 05105 .Case("vc", ARMCC::VC) 05106 .Case("hi", ARMCC::HI) 05107 .Case("ls", ARMCC::LS) 05108 .Case("ge", ARMCC::GE) 05109 .Case("lt", ARMCC::LT) 05110 .Case("gt", ARMCC::GT) 05111 .Case("le", ARMCC::LE) 05112 .Case("al", ARMCC::AL) 05113 .Default(~0U); 05114 if (CC != ~0U) { 05115 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 05116 PredicationCode = CC; 05117 } 05118 } 05119 05120 // Next, determine if we have a carry setting bit. We explicitly ignore all 05121 // the instructions we know end in 's'. 05122 if (Mnemonic.endswith("s") && 05123 !(Mnemonic == "cps" || Mnemonic == "mls" || 05124 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 05125 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 05126 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 05127 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" || 05128 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" || 05129 Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" || 05130 Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" || 05131 Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" || 05132 (Mnemonic == "movs" && isThumb()))) { 05133 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 05134 CarrySetting = true; 05135 } 05136 05137 // The "cps" instruction can have a interrupt mode operand which is glued into 05138 // the mnemonic. Check if this is the case, split it and parse the imod op 05139 if (Mnemonic.startswith("cps")) { 05140 // Split out any imod code. 05141 unsigned IMod = 05142 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 05143 .Case("ie", ARM_PROC::IE) 05144 .Case("id", ARM_PROC::ID) 05145 .Default(~0U); 05146 if (IMod != ~0U) { 05147 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 05148 ProcessorIMod = IMod; 05149 } 05150 } 05151 05152 // The "it" instruction has the condition mask on the end of the mnemonic. 05153 if (Mnemonic.startswith("it")) { 05154 ITMask = Mnemonic.slice(2, Mnemonic.size()); 05155 Mnemonic = Mnemonic.slice(0, 2); 05156 } 05157 05158 return Mnemonic; 05159 } 05160 05161 /// \brief Given a canonical mnemonic, determine if the instruction ever allows 05162 /// inclusion of carry set or predication code operands. 05163 // 05164 // FIXME: It would be nice to autogen this. 05165 void ARMAsmParser:: 05166 getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst, 05167 bool &CanAcceptCarrySet, bool &CanAcceptPredicationCode) { 05168 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 05169 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 05170 Mnemonic == "add" || Mnemonic == "adc" || 05171 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 05172 Mnemonic == "orr" || Mnemonic == "mvn" || 05173 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 05174 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 05175 Mnemonic == "vfm" || Mnemonic == "vfnm" || 05176 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 05177 Mnemonic == "mla" || Mnemonic == "smlal" || 05178 Mnemonic == "umlal" || Mnemonic == "umull"))) { 05179 CanAcceptCarrySet = true; 05180 } else 05181 CanAcceptCarrySet = false; 05182 05183 if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" || 05184 Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" || 05185 Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" || 05186 Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") || 05187 Mnemonic.startswith("vsel") || 05188 Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" || 05189 Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || 05190 Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" || 05191 Mnemonic == "vrintm" || Mnemonic.startswith("aes") || 05192 Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") || 05193 (FullInst.startswith("vmull") && FullInst.endswith(".p64"))) { 05194 // These mnemonics are never predicable 05195 CanAcceptPredicationCode = false; 05196 } else if (!isThumb()) { 05197 // Some instructions are only predicable in Thumb mode 05198 CanAcceptPredicationCode 05199 = Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" && 05200 Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" && 05201 Mnemonic != "dmb" && Mnemonic != "dsb" && Mnemonic != "isb" && 05202 Mnemonic != "pld" && Mnemonic != "pli" && Mnemonic != "pldw" && 05203 Mnemonic != "ldc2" && Mnemonic != "ldc2l" && 05204 Mnemonic != "stc2" && Mnemonic != "stc2l" && 05205 !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs"); 05206 } else if (isThumbOne()) { 05207 if (hasV6MOps()) 05208 CanAcceptPredicationCode = Mnemonic != "movs"; 05209 else 05210 CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs"; 05211 } else 05212 CanAcceptPredicationCode = true; 05213 } 05214 05215 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 05216 OperandVector &Operands) { 05217 // FIXME: This is all horribly hacky. We really need a better way to deal 05218 // with optional operands like this in the matcher table. 05219 05220 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 05221 // another does not. Specifically, the MOVW instruction does not. So we 05222 // special case it here and remove the defaulted (non-setting) cc_out 05223 // operand if that's the instruction we're trying to match. 05224 // 05225 // We do this as post-processing of the explicit operands rather than just 05226 // conditionally adding the cc_out in the first place because we need 05227 // to check the type of the parsed immediate operand. 05228 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 05229 !static_cast<ARMOperand &>(*Operands[4]).isARMSOImm() && 05230 static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() && 05231 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0) 05232 return true; 05233 05234 // Register-register 'add' for thumb does not have a cc_out operand 05235 // when there are only two register operands. 05236 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 05237 static_cast<ARMOperand &>(*Operands[3]).isReg() && 05238 static_cast<ARMOperand &>(*Operands[4]).isReg() && 05239 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0) 05240 return true; 05241 // Register-register 'add' for thumb does not have a cc_out operand 05242 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 05243 // have to check the immediate range here since Thumb2 has a variant 05244 // that can handle a different range and has a cc_out operand. 05245 if (((isThumb() && Mnemonic == "add") || 05246 (isThumbTwo() && Mnemonic == "sub")) && 05247 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() && 05248 static_cast<ARMOperand &>(*Operands[4]).isReg() && 05249 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP && 05250 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && 05251 ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) || 05252 static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4())) 05253 return true; 05254 // For Thumb2, add/sub immediate does not have a cc_out operand for the 05255 // imm0_4095 variant. That's the least-preferred variant when 05256 // selecting via the generic "add" mnemonic, so to know that we 05257 // should remove the cc_out operand, we have to explicitly check that 05258 // it's not one of the other variants. Ugh. 05259 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 05260 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() && 05261 static_cast<ARMOperand &>(*Operands[4]).isReg() && 05262 static_cast<ARMOperand &>(*Operands[5]).isImm()) { 05263 // Nest conditions rather than one big 'if' statement for readability. 05264 // 05265 // If both registers are low, we're in an IT block, and the immediate is 05266 // in range, we should use encoding T1 instead, which has a cc_out. 05267 if (inITBlock() && 05268 isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) && 05269 isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) && 05270 static_cast<ARMOperand &>(*Operands[5]).isImm0_7()) 05271 return false; 05272 // Check against T3. If the second register is the PC, this is an 05273 // alternate form of ADR, which uses encoding T4, so check for that too. 05274 if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC && 05275 static_cast<ARMOperand &>(*Operands[5]).isT2SOImm()) 05276 return false; 05277 05278 // Otherwise, we use encoding T4, which does not have a cc_out 05279 // operand. 05280 return true; 05281 } 05282 05283 // The thumb2 multiply instruction doesn't have a CCOut register, so 05284 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 05285 // use the 16-bit encoding or not. 05286 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 05287 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && 05288 static_cast<ARMOperand &>(*Operands[3]).isReg() && 05289 static_cast<ARMOperand &>(*Operands[4]).isReg() && 05290 static_cast<ARMOperand &>(*Operands[5]).isReg() && 05291 // If the registers aren't low regs, the destination reg isn't the 05292 // same as one of the source regs, or the cc_out operand is zero 05293 // outside of an IT block, we have to use the 32-bit encoding, so 05294 // remove the cc_out operand. 05295 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) || 05296 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) || 05297 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) || 05298 !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() != 05299 static_cast<ARMOperand &>(*Operands[5]).getReg() && 05300 static_cast<ARMOperand &>(*Operands[3]).getReg() != 05301 static_cast<ARMOperand &>(*Operands[4]).getReg()))) 05302 return true; 05303 05304 // Also check the 'mul' syntax variant that doesn't specify an explicit 05305 // destination register. 05306 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && 05307 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && 05308 static_cast<ARMOperand &>(*Operands[3]).isReg() && 05309 static_cast<ARMOperand &>(*Operands[4]).isReg() && 05310 // If the registers aren't low regs or the cc_out operand is zero 05311 // outside of an IT block, we have to use the 32-bit encoding, so 05312 // remove the cc_out operand. 05313 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) || 05314 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) || 05315 !inITBlock())) 05316 return true; 05317 05318 05319 05320 // Register-register 'add/sub' for thumb does not have a cc_out operand 05321 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 05322 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 05323 // right, this will result in better diagnostics (which operand is off) 05324 // anyway. 05325 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 05326 (Operands.size() == 5 || Operands.size() == 6) && 05327 static_cast<ARMOperand &>(*Operands[3]).isReg() && 05328 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP && 05329 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && 05330 (static_cast<ARMOperand &>(*Operands[4]).isImm() || 05331 (Operands.size() == 6 && 05332 static_cast<ARMOperand &>(*Operands[5]).isImm()))) 05333 return true; 05334 05335 return false; 05336 } 05337 05338 bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic, 05339 OperandVector &Operands) { 05340 // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON 05341 unsigned RegIdx = 3; 05342 if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") && 05343 static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32") { 05344 if (static_cast<ARMOperand &>(*Operands[3]).isToken() && 05345 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32") 05346 RegIdx = 4; 05347 05348 if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() && 05349 (ARMMCRegisterClasses[ARM::DPRRegClassID].contains( 05350 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) || 05351 ARMMCRegisterClasses[ARM::QPRRegClassID].contains( 05352 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()))) 05353 return true; 05354 } 05355 return false; 05356 } 05357 05358 static bool isDataTypeToken(StringRef Tok) { 05359 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || 05360 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || 05361 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" || 05362 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" || 05363 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" || 05364 Tok == ".f" || Tok == ".d"; 05365 } 05366 05367 // FIXME: This bit should probably be handled via an explicit match class 05368 // in the .td files that matches the suffix instead of having it be 05369 // a literal string token the way it is now. 05370 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { 05371 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); 05372 } 05373 static void applyMnemonicAliases(StringRef &Mnemonic, uint64_t Features, 05374 unsigned VariantID); 05375 05376 static bool RequiresVFPRegListValidation(StringRef Inst, 05377 bool &AcceptSinglePrecisionOnly, 05378 bool &AcceptDoublePrecisionOnly) { 05379 if (Inst.size() < 7) 05380 return false; 05381 05382 if (Inst.startswith("fldm") || Inst.startswith("fstm")) { 05383 StringRef AddressingMode = Inst.substr(4, 2); 05384 if (AddressingMode == "ia" || AddressingMode == "db" || 05385 AddressingMode == "ea" || AddressingMode == "fd") { 05386 AcceptSinglePrecisionOnly = Inst[6] == 's'; 05387 AcceptDoublePrecisionOnly = Inst[6] == 'd' || Inst[6] == 'x'; 05388 return true; 05389 } 05390 } 05391 05392 return false; 05393 } 05394 05395 /// Parse an arm instruction mnemonic followed by its operands. 05396 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, 05397 SMLoc NameLoc, OperandVector &Operands) { 05398 // FIXME: Can this be done via tablegen in some fashion? 05399 bool RequireVFPRegisterListCheck; 05400 bool AcceptSinglePrecisionOnly; 05401 bool AcceptDoublePrecisionOnly; 05402 RequireVFPRegisterListCheck = 05403 RequiresVFPRegListValidation(Name, AcceptSinglePrecisionOnly, 05404 AcceptDoublePrecisionOnly); 05405 05406 // Apply mnemonic aliases before doing anything else, as the destination 05407 // mnemonic may include suffices and we want to handle them normally. 05408 // The generic tblgen'erated code does this later, at the start of 05409 // MatchInstructionImpl(), but that's too late for aliases that include 05410 // any sort of suffix. 05411 uint64_t AvailableFeatures = getAvailableFeatures(); 05412 unsigned AssemblerDialect = getParser().getAssemblerDialect(); 05413 applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect); 05414 05415 // First check for the ARM-specific .req directive. 05416 if (Parser.getTok().is(AsmToken::Identifier) && 05417 Parser.getTok().getIdentifier() == ".req") { 05418 parseDirectiveReq(Name, NameLoc); 05419 // We always return 'error' for this, as we're done with this 05420 // statement and don't need to match the 'instruction." 05421 return true; 05422 } 05423 05424 // Create the leading tokens for the mnemonic, split by '.' characters. 05425 size_t Start = 0, Next = Name.find('.'); 05426 StringRef Mnemonic = Name.slice(Start, Next); 05427 05428 // Split out the predication code and carry setting flag from the mnemonic. 05429 unsigned PredicationCode; 05430 unsigned ProcessorIMod; 05431 bool CarrySetting; 05432 StringRef ITMask; 05433 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 05434 ProcessorIMod, ITMask); 05435 05436 // In Thumb1, only the branch (B) instruction can be predicated. 05437 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 05438 Parser.eatToEndOfStatement(); 05439 return Error(NameLoc, "conditional execution not supported in Thumb1"); 05440 } 05441 05442 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 05443 05444 // Handle the IT instruction ITMask. Convert it to a bitmask. This 05445 // is the mask as it will be for the IT encoding if the conditional 05446 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 05447 // where the conditional bit0 is zero, the instruction post-processing 05448 // will adjust the mask accordingly. 05449 if (Mnemonic == "it") { 05450 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 05451 if (ITMask.size() > 3) { 05452 Parser.eatToEndOfStatement(); 05453 return Error(Loc, "too many conditions on IT instruction"); 05454 } 05455 unsigned Mask = 8; 05456 for (unsigned i = ITMask.size(); i != 0; --i) { 05457 char pos = ITMask[i - 1]; 05458 if (pos != 't' && pos != 'e') { 05459 Parser.eatToEndOfStatement(); 05460 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 05461 } 05462 Mask >>= 1; 05463 if (ITMask[i - 1] == 't') 05464 Mask |= 8; 05465 } 05466 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 05467 } 05468 05469 // FIXME: This is all a pretty gross hack. We should automatically handle 05470 // optional operands like this via tblgen. 05471 05472 // Next, add the CCOut and ConditionCode operands, if needed. 05473 // 05474 // For mnemonics which can ever incorporate a carry setting bit or predication 05475 // code, our matching model involves us always generating CCOut and 05476 // ConditionCode operands to match the mnemonic "as written" and then we let 05477 // the matcher deal with finding the right instruction or generating an 05478 // appropriate error. 05479 bool CanAcceptCarrySet, CanAcceptPredicationCode; 05480 getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode); 05481 05482 // If we had a carry-set on an instruction that can't do that, issue an 05483 // error. 05484 if (!CanAcceptCarrySet && CarrySetting) { 05485 Parser.eatToEndOfStatement(); 05486 return Error(NameLoc, "instruction '" + Mnemonic + 05487 "' can not set flags, but 's' suffix specified"); 05488 } 05489 // If we had a predication code on an instruction that can't do that, issue an 05490 // error. 05491 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 05492 Parser.eatToEndOfStatement(); 05493 return Error(NameLoc, "instruction '" + Mnemonic + 05494 "' is not predicable, but condition code specified"); 05495 } 05496 05497 // Add the carry setting operand, if necessary. 05498 if (CanAcceptCarrySet) { 05499 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 05500 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 05501 Loc)); 05502 } 05503 05504 // Add the predication code operand, if necessary. 05505 if (CanAcceptPredicationCode) { 05506 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 05507 CarrySetting); 05508 Operands.push_back(ARMOperand::CreateCondCode( 05509 ARMCC::CondCodes(PredicationCode), Loc)); 05510 } 05511 05512 // Add the processor imod operand, if necessary. 05513 if (ProcessorIMod) { 05514 Operands.push_back(ARMOperand::CreateImm( 05515 MCConstantExpr::Create(ProcessorIMod, getContext()), 05516 NameLoc, NameLoc)); 05517 } 05518 05519 // Add the remaining tokens in the mnemonic. 05520 while (Next != StringRef::npos) { 05521 Start = Next; 05522 Next = Name.find('.', Start + 1); 05523 StringRef ExtraToken = Name.slice(Start, Next); 05524 05525 // Some NEON instructions have an optional datatype suffix that is 05526 // completely ignored. Check for that. 05527 if (isDataTypeToken(ExtraToken) && 05528 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) 05529 continue; 05530 05531 // For for ARM mode generate an error if the .n qualifier is used. 05532 if (ExtraToken == ".n" && !isThumb()) { 05533 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 05534 Parser.eatToEndOfStatement(); 05535 return Error(Loc, "instruction with .n (narrow) qualifier not allowed in " 05536 "arm mode"); 05537 } 05538 05539 // The .n qualifier is always discarded as that is what the tables 05540 // and matcher expect. In ARM mode the .w qualifier has no effect, 05541 // so discard it to avoid errors that can be caused by the matcher. 05542 if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) { 05543 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 05544 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 05545 } 05546 } 05547 05548 // Read the remaining operands. 05549 if (getLexer().isNot(AsmToken::EndOfStatement)) { 05550 // Read the first operand. 05551 if (parseOperand(Operands, Mnemonic)) { 05552 Parser.eatToEndOfStatement(); 05553 return true; 05554 } 05555 05556 while (getLexer().is(AsmToken::Comma)) { 05557 Parser.Lex(); // Eat the comma. 05558 05559 // Parse and remember the operand. 05560 if (parseOperand(Operands, Mnemonic)) { 05561 Parser.eatToEndOfStatement(); 05562 return true; 05563 } 05564 } 05565 } 05566 05567 if (getLexer().isNot(AsmToken::EndOfStatement)) { 05568 SMLoc Loc = getLexer().getLoc(); 05569 Parser.eatToEndOfStatement(); 05570 return Error(Loc, "unexpected token in argument list"); 05571 } 05572 05573 Parser.Lex(); // Consume the EndOfStatement 05574 05575 if (RequireVFPRegisterListCheck) { 05576 ARMOperand &Op = static_cast<ARMOperand &>(*Operands.back()); 05577 if (AcceptSinglePrecisionOnly && !Op.isSPRRegList()) 05578 return Error(Op.getStartLoc(), 05579 "VFP/Neon single precision register expected"); 05580 if (AcceptDoublePrecisionOnly && !Op.isDPRRegList()) 05581 return Error(Op.getStartLoc(), 05582 "VFP/Neon double precision register expected"); 05583 } 05584 05585 // Some instructions, mostly Thumb, have forms for the same mnemonic that 05586 // do and don't have a cc_out optional-def operand. With some spot-checks 05587 // of the operand list, we can figure out which variant we're trying to 05588 // parse and adjust accordingly before actually matching. We shouldn't ever 05589 // try to remove a cc_out operand that was explicitly set on the the 05590 // mnemonic, of course (CarrySetting == true). Reason number #317 the 05591 // table driven matcher doesn't fit well with the ARM instruction set. 05592 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) 05593 Operands.erase(Operands.begin() + 1); 05594 05595 // Some instructions have the same mnemonic, but don't always 05596 // have a predicate. Distinguish them here and delete the 05597 // predicate if needed. 05598 if (shouldOmitPredicateOperand(Mnemonic, Operands)) 05599 Operands.erase(Operands.begin() + 1); 05600 05601 // ARM mode 'blx' need special handling, as the register operand version 05602 // is predicable, but the label operand version is not. So, we can't rely 05603 // on the Mnemonic based checking to correctly figure out when to put 05604 // a k_CondCode operand in the list. If we're trying to match the label 05605 // version, remove the k_CondCode operand here. 05606 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 05607 static_cast<ARMOperand &>(*Operands[2]).isImm()) 05608 Operands.erase(Operands.begin() + 1); 05609 05610 // Adjust operands of ldrexd/strexd to MCK_GPRPair. 05611 // ldrexd/strexd require even/odd GPR pair. To enforce this constraint, 05612 // a single GPRPair reg operand is used in the .td file to replace the two 05613 // GPRs. However, when parsing from asm, the two GRPs cannot be automatically 05614 // expressed as a GPRPair, so we have to manually merge them. 05615 // FIXME: We would really like to be able to tablegen'erate this. 05616 if (!isThumb() && Operands.size() > 4 && 05617 (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" || 05618 Mnemonic == "stlexd")) { 05619 bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd"); 05620 unsigned Idx = isLoad ? 2 : 3; 05621 ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]); 05622 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]); 05623 05624 const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID); 05625 // Adjust only if Op1 and Op2 are GPRs. 05626 if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) && 05627 MRC.contains(Op2.getReg())) { 05628 unsigned Reg1 = Op1.getReg(); 05629 unsigned Reg2 = Op2.getReg(); 05630 unsigned Rt = MRI->getEncodingValue(Reg1); 05631 unsigned Rt2 = MRI->getEncodingValue(Reg2); 05632 05633 // Rt2 must be Rt + 1 and Rt must be even. 05634 if (Rt + 1 != Rt2 || (Rt & 1)) { 05635 Error(Op2.getStartLoc(), isLoad 05636 ? "destination operands must be sequential" 05637 : "source operands must be sequential"); 05638 return true; 05639 } 05640 unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0, 05641 &(MRI->getRegClass(ARM::GPRPairRegClassID))); 05642 Operands[Idx] = 05643 ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc()); 05644 Operands.erase(Operands.begin() + Idx + 1); 05645 } 05646 } 05647 05648 // GNU Assembler extension (compatibility) 05649 if ((Mnemonic == "ldrd" || Mnemonic == "strd")) { 05650 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]); 05651 ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]); 05652 if (Op3.isMem()) { 05653 assert(Op2.isReg() && "expected register argument"); 05654 05655 unsigned SuperReg = MRI->getMatchingSuperReg( 05656 Op2.getReg(), ARM::gsub_0, &MRI->getRegClass(ARM::GPRPairRegClassID)); 05657 05658 assert(SuperReg && "expected register pair"); 05659 05660 unsigned PairedReg = MRI->getSubReg(SuperReg, ARM::gsub_1); 05661 05662 Operands.insert( 05663 Operands.begin() + 3, 05664 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc())); 05665 } 05666 } 05667 05668 // FIXME: As said above, this is all a pretty gross hack. This instruction 05669 // does not fit with other "subs" and tblgen. 05670 // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction 05671 // so the Mnemonic is the original name "subs" and delete the predicate 05672 // operand so it will match the table entry. 05673 if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 && 05674 static_cast<ARMOperand &>(*Operands[3]).isReg() && 05675 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC && 05676 static_cast<ARMOperand &>(*Operands[4]).isReg() && 05677 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR && 05678 static_cast<ARMOperand &>(*Operands[5]).isImm()) { 05679 Operands.front() = ARMOperand::CreateToken(Name, NameLoc); 05680 Operands.erase(Operands.begin() + 1); 05681 } 05682 return false; 05683 } 05684 05685 // Validate context-sensitive operand constraints. 05686 05687 // return 'true' if register list contains non-low GPR registers, 05688 // 'false' otherwise. If Reg is in the register list or is HiReg, set 05689 // 'containsReg' to true. 05690 static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 05691 unsigned HiReg, bool &containsReg) { 05692 containsReg = false; 05693 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 05694 unsigned OpReg = Inst.getOperand(i).getReg(); 05695 if (OpReg == Reg) 05696 containsReg = true; 05697 // Anything other than a low register isn't legal here. 05698 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 05699 return true; 05700 } 05701 return false; 05702 } 05703 05704 // Check if the specified regisgter is in the register list of the inst, 05705 // starting at the indicated operand number. 05706 static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 05707 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 05708 unsigned OpReg = Inst.getOperand(i).getReg(); 05709 if (OpReg == Reg) 05710 return true; 05711 } 05712 return false; 05713 } 05714 05715 // Return true if instruction has the interesting property of being 05716 // allowed in IT blocks, but not being predicable. 05717 static bool instIsBreakpoint(const MCInst &Inst) { 05718 return Inst.getOpcode() == ARM::tBKPT || 05719 Inst.getOpcode() == ARM::BKPT || 05720 Inst.getOpcode() == ARM::tHLT || 05721 Inst.getOpcode() == ARM::HLT; 05722 05723 } 05724 05725 // FIXME: We would really like to be able to tablegen'erate this. 05726 bool ARMAsmParser::validateInstruction(MCInst &Inst, 05727 const OperandVector &Operands) { 05728 const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); 05729 SMLoc Loc = Operands[0]->getStartLoc(); 05730 05731 // Check the IT block state first. 05732 // NOTE: BKPT and HLT instructions have the interesting property of being 05733 // allowed in IT blocks, but not being predicable. They just always execute. 05734 if (inITBlock() && !instIsBreakpoint(Inst)) { 05735 unsigned Bit = 1; 05736 if (ITState.FirstCond) 05737 ITState.FirstCond = false; 05738 else 05739 Bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 05740 // The instruction must be predicable. 05741 if (!MCID.isPredicable()) 05742 return Error(Loc, "instructions in IT block must be predicable"); 05743 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 05744 unsigned ITCond = Bit ? ITState.Cond : 05745 ARMCC::getOppositeCondition(ITState.Cond); 05746 if (Cond != ITCond) { 05747 // Find the condition code Operand to get its SMLoc information. 05748 SMLoc CondLoc; 05749 for (unsigned I = 1; I < Operands.size(); ++I) 05750 if (static_cast<ARMOperand &>(*Operands[I]).isCondCode()) 05751 CondLoc = Operands[I]->getStartLoc(); 05752 return Error(CondLoc, "incorrect condition in IT block; got '" + 05753 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 05754 "', but expected '" + 05755 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 05756 } 05757 // Check for non-'al' condition codes outside of the IT block. 05758 } else if (isThumbTwo() && MCID.isPredicable() && 05759 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 05760 ARMCC::AL && Inst.getOpcode() != ARM::tBcc && 05761 Inst.getOpcode() != ARM::t2Bcc) 05762 return Error(Loc, "predicated instructions must be in IT block"); 05763 05764 const unsigned Opcode = Inst.getOpcode(); 05765 switch (Opcode) { 05766 case ARM::LDRD: 05767 case ARM::LDRD_PRE: 05768 case ARM::LDRD_POST: { 05769 const unsigned RtReg = Inst.getOperand(0).getReg(); 05770 05771 // Rt can't be R14. 05772 if (RtReg == ARM::LR) 05773 return Error(Operands[3]->getStartLoc(), 05774 "Rt can't be R14"); 05775 05776 const unsigned Rt = MRI->getEncodingValue(RtReg); 05777 // Rt must be even-numbered. 05778 if ((Rt & 1) == 1) 05779 return Error(Operands[3]->getStartLoc(), 05780 "Rt must be even-numbered"); 05781 05782 // Rt2 must be Rt + 1. 05783 const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg()); 05784 if (Rt2 != Rt + 1) 05785 return Error(Operands[3]->getStartLoc(), 05786 "destination operands must be sequential"); 05787 05788 if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) { 05789 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg()); 05790 // For addressing modes with writeback, the base register needs to be 05791 // different from the destination registers. 05792 if (Rn == Rt || Rn == Rt2) 05793 return Error(Operands[3]->getStartLoc(), 05794 "base register needs to be different from destination " 05795 "registers"); 05796 } 05797 05798 return false; 05799 } 05800 case ARM::t2LDRDi8: 05801 case ARM::t2LDRD_PRE: 05802 case ARM::t2LDRD_POST: { 05803 // Rt2 must be different from Rt. 05804 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg()); 05805 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg()); 05806 if (Rt2 == Rt) 05807 return Error(Operands[3]->getStartLoc(), 05808 "destination operands can't be identical"); 05809 return false; 05810 } 05811 case ARM::STRD: { 05812 // Rt2 must be Rt + 1. 05813 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg()); 05814 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg()); 05815 if (Rt2 != Rt + 1) 05816 return Error(Operands[3]->getStartLoc(), 05817 "source operands must be sequential"); 05818 return false; 05819 } 05820 case ARM::STRD_PRE: 05821 case ARM::STRD_POST: { 05822 // Rt2 must be Rt + 1. 05823 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg()); 05824 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg()); 05825 if (Rt2 != Rt + 1) 05826 return Error(Operands[3]->getStartLoc(), 05827 "source operands must be sequential"); 05828 return false; 05829 } 05830 case ARM::STR_PRE_IMM: 05831 case ARM::STR_PRE_REG: 05832 case ARM::STR_POST_IMM: 05833 case ARM::STR_POST_REG: 05834 case ARM::STRH_PRE: 05835 case ARM::STRH_POST: 05836 case ARM::STRB_PRE_IMM: 05837 case ARM::STRB_PRE_REG: 05838 case ARM::STRB_POST_IMM: 05839 case ARM::STRB_POST_REG: { 05840 // Rt must be different from Rn. 05841 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg()); 05842 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg()); 05843 05844 if (Rt == Rn) 05845 return Error(Operands[3]->getStartLoc(), 05846 "source register and base register can't be identical"); 05847 return false; 05848 } 05849 case ARM::LDR_PRE_IMM: 05850 case ARM::LDR_PRE_REG: 05851 case ARM::LDR_POST_IMM: 05852 case ARM::LDR_POST_REG: 05853 case ARM::LDRH_PRE: 05854 case ARM::LDRH_POST: 05855 case ARM::LDRSH_PRE: 05856 case ARM::LDRSH_POST: 05857 case ARM::LDRB_PRE_IMM: 05858 case ARM::LDRB_PRE_REG: 05859 case ARM::LDRB_POST_IMM: 05860 case ARM::LDRB_POST_REG: 05861 case ARM::LDRSB_PRE: 05862 case ARM::LDRSB_POST: { 05863 // Rt must be different from Rn. 05864 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg()); 05865 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg()); 05866 05867 if (Rt == Rn) 05868 return Error(Operands[3]->getStartLoc(), 05869 "destination register and base register can't be identical"); 05870 return false; 05871 } 05872 case ARM::SBFX: 05873 case ARM::UBFX: { 05874 // Width must be in range [1, 32-lsb]. 05875 unsigned LSB = Inst.getOperand(2).getImm(); 05876 unsigned Widthm1 = Inst.getOperand(3).getImm(); 05877 if (Widthm1 >= 32 - LSB) 05878 return Error(Operands[5]->getStartLoc(), 05879 "bitfield width must be in range [1,32-lsb]"); 05880 return false; 05881 } 05882 // Notionally handles ARM::tLDMIA_UPD too. 05883 case ARM::tLDMIA: { 05884 // If we're parsing Thumb2, the .w variant is available and handles 05885 // most cases that are normally illegal for a Thumb1 LDM instruction. 05886 // We'll make the transformation in processInstruction() if necessary. 05887 // 05888 // Thumb LDM instructions are writeback iff the base register is not 05889 // in the register list. 05890 unsigned Rn = Inst.getOperand(0).getReg(); 05891 bool HasWritebackToken = 05892 (static_cast<ARMOperand &>(*Operands[3]).isToken() && 05893 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!"); 05894 bool ListContainsBase; 05895 if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo()) 05896 return Error(Operands[3 + HasWritebackToken]->getStartLoc(), 05897 "registers must be in range r0-r7"); 05898 // If we should have writeback, then there should be a '!' token. 05899 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo()) 05900 return Error(Operands[2]->getStartLoc(), 05901 "writeback operator '!' expected"); 05902 // If we should not have writeback, there must not be a '!'. This is 05903 // true even for the 32-bit wide encodings. 05904 if (ListContainsBase && HasWritebackToken) 05905 return Error(Operands[3]->getStartLoc(), 05906 "writeback operator '!' not allowed when base register " 05907 "in register list"); 05908 05909 break; 05910 } 05911 case ARM::LDMIA_UPD: 05912 case ARM::LDMDB_UPD: 05913 case ARM::LDMIB_UPD: 05914 case ARM::LDMDA_UPD: 05915 // ARM variants loading and updating the same register are only officially 05916 // UNPREDICTABLE on v7 upwards. Goodness knows what they did before. 05917 if (!hasV7Ops()) 05918 break; 05919 // Fallthrough 05920 case ARM::t2LDMIA_UPD: 05921 case ARM::t2LDMDB_UPD: 05922 case ARM::t2STMIA_UPD: 05923 case ARM::t2STMDB_UPD: { 05924 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 05925 return Error(Operands.back()->getStartLoc(), 05926 "writeback register not allowed in register list"); 05927 break; 05928 } 05929 case ARM::sysLDMIA_UPD: 05930 case ARM::sysLDMDA_UPD: 05931 case ARM::sysLDMDB_UPD: 05932 case ARM::sysLDMIB_UPD: 05933 if (!listContainsReg(Inst, 3, ARM::PC)) 05934 return Error(Operands[4]->getStartLoc(), 05935 "writeback register only allowed on system LDM " 05936 "if PC in register-list"); 05937 break; 05938 case ARM::sysSTMIA_UPD: 05939 case ARM::sysSTMDA_UPD: 05940 case ARM::sysSTMDB_UPD: 05941 case ARM::sysSTMIB_UPD: 05942 return Error(Operands[2]->getStartLoc(), 05943 "system STM cannot have writeback register"); 05944 case ARM::tMUL: { 05945 // The second source operand must be the same register as the destination 05946 // operand. 05947 // 05948 // In this case, we must directly check the parsed operands because the 05949 // cvtThumbMultiply() function is written in such a way that it guarantees 05950 // this first statement is always true for the new Inst. Essentially, the 05951 // destination is unconditionally copied into the second source operand 05952 // without checking to see if it matches what we actually parsed. 05953 if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() != 05954 ((ARMOperand &)*Operands[5]).getReg()) && 05955 (((ARMOperand &)*Operands[3]).getReg() != 05956 ((ARMOperand &)*Operands[4]).getReg())) { 05957 return Error(Operands[3]->getStartLoc(), 05958 "destination register must match source register"); 05959 } 05960 break; 05961 } 05962 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, 05963 // so only issue a diagnostic for thumb1. The instructions will be 05964 // switched to the t2 encodings in processInstruction() if necessary. 05965 case ARM::tPOP: { 05966 bool ListContainsBase; 05967 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) && 05968 !isThumbTwo()) 05969 return Error(Operands[2]->getStartLoc(), 05970 "registers must be in range r0-r7 or pc"); 05971 break; 05972 } 05973 case ARM::tPUSH: { 05974 bool ListContainsBase; 05975 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) && 05976 !isThumbTwo()) 05977 return Error(Operands[2]->getStartLoc(), 05978 "registers must be in range r0-r7 or lr"); 05979 break; 05980 } 05981 case ARM::tSTMIA_UPD: { 05982 bool ListContainsBase, InvalidLowList; 05983 InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(), 05984 0, ListContainsBase); 05985 if (InvalidLowList && !isThumbTwo()) 05986 return Error(Operands[4]->getStartLoc(), 05987 "registers must be in range r0-r7"); 05988 05989 // This would be converted to a 32-bit stm, but that's not valid if the 05990 // writeback register is in the list. 05991 if (InvalidLowList && ListContainsBase) 05992 return Error(Operands[4]->getStartLoc(), 05993 "writeback operator '!' not allowed when base register " 05994 "in register list"); 05995 break; 05996 } 05997 case ARM::tADDrSP: { 05998 // If the non-SP source operand and the destination operand are not the 05999 // same, we need thumb2 (for the wide encoding), or we have an error. 06000 if (!isThumbTwo() && 06001 Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) { 06002 return Error(Operands[4]->getStartLoc(), 06003 "source register must be the same as destination"); 06004 } 06005 break; 06006 } 06007 // Final range checking for Thumb unconditional branch instructions. 06008 case ARM::tB: 06009 if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>()) 06010 return Error(Operands[2]->getStartLoc(), "branch target out of range"); 06011 break; 06012 case ARM::t2B: { 06013 int op = (Operands[2]->isImm()) ? 2 : 3; 06014 if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>()) 06015 return Error(Operands[op]->getStartLoc(), "branch target out of range"); 06016 break; 06017 } 06018 // Final range checking for Thumb conditional branch instructions. 06019 case ARM::tBcc: 06020 if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>()) 06021 return Error(Operands[2]->getStartLoc(), "branch target out of range"); 06022 break; 06023 case ARM::t2Bcc: { 06024 int Op = (Operands[2]->isImm()) ? 2 : 3; 06025 if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>()) 06026 return Error(Operands[Op]->getStartLoc(), "branch target out of range"); 06027 break; 06028 } 06029 case ARM::MOVi16: 06030 case ARM::t2MOVi16: 06031 case ARM::t2MOVTi16: 06032 { 06033 // We want to avoid misleadingly allowing something like "mov r0, <symbol>" 06034 // especially when we turn it into a movw and the expression <symbol> does 06035 // not have a :lower16: or :upper16 as part of the expression. We don't 06036 // want the behavior of silently truncating, which can be unexpected and 06037 // lead to bugs that are difficult to find since this is an easy mistake 06038 // to make. 06039 int i = (Operands[3]->isImm()) ? 3 : 4; 06040 ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]); 06041 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()); 06042 if (CE) break; 06043 const MCExpr *E = dyn_cast<MCExpr>(Op.getImm()); 06044 if (!E) break; 06045 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E); 06046 if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 && 06047 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16)) 06048 return Error( 06049 Op.getStartLoc(), 06050 "immediate expression for mov requires :lower16: or :upper16"); 06051 break; 06052 } 06053 } 06054 06055 return false; 06056 } 06057 06058 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) { 06059 switch(Opc) { 06060 default: llvm_unreachable("unexpected opcode!"); 06061 // VST1LN 06062 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD; 06063 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD; 06064 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD; 06065 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD; 06066 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD; 06067 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD; 06068 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8; 06069 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16; 06070 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32; 06071 06072 // VST2LN 06073 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD; 06074 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD; 06075 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD; 06076 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD; 06077 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD; 06078 06079 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD; 06080 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD; 06081 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD; 06082 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD; 06083 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD; 06084 06085 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8; 06086 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16; 06087 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32; 06088 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16; 06089 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32; 06090 06091 // VST3LN 06092 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD; 06093 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD; 06094 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD; 06095 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD; 06096 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD; 06097 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD; 06098 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD; 06099 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD; 06100 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD; 06101 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD; 06102 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8; 06103 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16; 06104 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32; 06105 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16; 06106 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32; 06107 06108 // VST3 06109 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD; 06110 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD; 06111 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD; 06112 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD; 06113 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD; 06114 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD; 06115 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD; 06116 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD; 06117 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD; 06118 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD; 06119 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD; 06120 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD; 06121 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8; 06122 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16; 06123 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32; 06124 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8; 06125 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16; 06126 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32; 06127 06128 // VST4LN 06129 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD; 06130 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD; 06131 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD; 06132 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD; 06133 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD; 06134 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD; 06135 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD; 06136 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD; 06137 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD; 06138 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD; 06139 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8; 06140 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16; 06141 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32; 06142 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16; 06143 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32; 06144 06145 // VST4 06146 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD; 06147 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD; 06148 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD; 06149 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD; 06150 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD; 06151 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD; 06152 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD; 06153 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD; 06154 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD; 06155 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD; 06156 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD; 06157 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD; 06158 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8; 06159 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16; 06160 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32; 06161 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8; 06162 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16; 06163 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32; 06164 } 06165 } 06166 06167 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) { 06168 switch(Opc) { 06169 default: llvm_unreachable("unexpected opcode!"); 06170 // VLD1LN 06171 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD; 06172 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD; 06173 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD; 06174 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD; 06175 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD; 06176 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD; 06177 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8; 06178 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16; 06179 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32; 06180 06181 // VLD2LN 06182 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD; 06183 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD; 06184 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD; 06185 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD; 06186 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD; 06187 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD; 06188 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD; 06189 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD; 06190 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD; 06191 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD; 06192 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8; 06193 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16; 06194 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32; 06195 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16; 06196 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32; 06197 06198 // VLD3DUP 06199 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD; 06200 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD; 06201 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD; 06202 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD; 06203 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD; 06204 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD; 06205 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD; 06206 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD; 06207 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD; 06208 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD; 06209 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD; 06210 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD; 06211 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8; 06212 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16; 06213 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32; 06214 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8; 06215 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16; 06216 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32; 06217 06218 // VLD3LN 06219 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD; 06220 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD; 06221 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD; 06222 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD; 06223 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD; 06224 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD; 06225 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD; 06226 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD; 06227 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD; 06228 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD; 06229 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8; 06230 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16; 06231 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32; 06232 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16; 06233 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32; 06234 06235 // VLD3 06236 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD; 06237 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD; 06238 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD; 06239 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD; 06240 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD; 06241 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD; 06242 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD; 06243 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD; 06244 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD; 06245 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD; 06246 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD; 06247 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD; 06248 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8; 06249 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16; 06250 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32; 06251 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8; 06252 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16; 06253 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32; 06254 06255 // VLD4LN 06256 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD; 06257 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD; 06258 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD; 06259 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD; 06260 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD; 06261 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD; 06262 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD; 06263 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD; 06264 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD; 06265 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD; 06266 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8; 06267 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16; 06268 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32; 06269 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16; 06270 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32; 06271 06272 // VLD4DUP 06273 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD; 06274 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD; 06275 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD; 06276 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD; 06277 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD; 06278 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD; 06279 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD; 06280 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD; 06281 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD; 06282 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD; 06283 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD; 06284 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD; 06285 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8; 06286 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16; 06287 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32; 06288 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8; 06289 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16; 06290 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32; 06291 06292 // VLD4 06293 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD; 06294 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD; 06295 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD; 06296 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD; 06297 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD; 06298 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD; 06299 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD; 06300 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD; 06301 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD; 06302 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD; 06303 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD; 06304 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD; 06305 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8; 06306 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16; 06307 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32; 06308 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8; 06309 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16; 06310 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32; 06311 } 06312 } 06313 06314 bool ARMAsmParser::processInstruction(MCInst &Inst, 06315 const OperandVector &Operands) { 06316 switch (Inst.getOpcode()) { 06317 // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction. 06318 case ARM::LDRT_POST: 06319 case ARM::LDRBT_POST: { 06320 const unsigned Opcode = 06321 (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM 06322 : ARM::LDRBT_POST_IMM; 06323 MCInst TmpInst; 06324 TmpInst.setOpcode(Opcode); 06325 TmpInst.addOperand(Inst.getOperand(0)); 06326 TmpInst.addOperand(Inst.getOperand(1)); 06327 TmpInst.addOperand(Inst.getOperand(1)); 06328 TmpInst.addOperand(MCOperand::CreateReg(0)); 06329 TmpInst.addOperand(MCOperand::CreateImm(0)); 06330 TmpInst.addOperand(Inst.getOperand(2)); 06331 TmpInst.addOperand(Inst.getOperand(3)); 06332 Inst = TmpInst; 06333 return true; 06334 } 06335 // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction. 06336 case ARM::STRT_POST: 06337 case ARM::STRBT_POST: { 06338 const unsigned Opcode = 06339 (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM 06340 : ARM::STRBT_POST_IMM; 06341 MCInst TmpInst; 06342 TmpInst.setOpcode(Opcode); 06343 TmpInst.addOperand(Inst.getOperand(1)); 06344 TmpInst.addOperand(Inst.getOperand(0)); 06345 TmpInst.addOperand(Inst.getOperand(1)); 06346 TmpInst.addOperand(MCOperand::CreateReg(0)); 06347 TmpInst.addOperand(MCOperand::CreateImm(0)); 06348 TmpInst.addOperand(Inst.getOperand(2)); 06349 TmpInst.addOperand(Inst.getOperand(3)); 06350 Inst = TmpInst; 06351 return true; 06352 } 06353 // Alias for alternate form of 'ADR Rd, #imm' instruction. 06354 case ARM::ADDri: { 06355 if (Inst.getOperand(1).getReg() != ARM::PC || 06356 Inst.getOperand(5).getReg() != 0) 06357 return false; 06358 MCInst TmpInst; 06359 TmpInst.setOpcode(ARM::ADR); 06360 TmpInst.addOperand(Inst.getOperand(0)); 06361 TmpInst.addOperand(Inst.getOperand(2)); 06362 TmpInst.addOperand(Inst.getOperand(3)); 06363 TmpInst.addOperand(Inst.getOperand(4)); 06364 Inst = TmpInst; 06365 return true; 06366 } 06367 // Aliases for alternate PC+imm syntax of LDR instructions. 06368 case ARM::t2LDRpcrel: 06369 // Select the narrow version if the immediate will fit. 06370 if (Inst.getOperand(1).getImm() > 0 && 06371 Inst.getOperand(1).getImm() <= 0xff && 06372 !(static_cast<ARMOperand &>(*Operands[2]).isToken() && 06373 static_cast<ARMOperand &>(*Operands[2]).getToken() == ".w")) 06374 Inst.setOpcode(ARM::tLDRpci); 06375 else 06376 Inst.setOpcode(ARM::t2LDRpci); 06377 return true; 06378 case ARM::t2LDRBpcrel: 06379 Inst.setOpcode(ARM::t2LDRBpci); 06380 return true; 06381 case ARM::t2LDRHpcrel: 06382 Inst.setOpcode(ARM::t2LDRHpci); 06383 return true; 06384 case ARM::t2LDRSBpcrel: 06385 Inst.setOpcode(ARM::t2LDRSBpci); 06386 return true; 06387 case ARM::t2LDRSHpcrel: 06388 Inst.setOpcode(ARM::t2LDRSHpci); 06389 return true; 06390 // Handle NEON VST complex aliases. 06391 case ARM::VST1LNdWB_register_Asm_8: 06392 case ARM::VST1LNdWB_register_Asm_16: 06393 case ARM::VST1LNdWB_register_Asm_32: { 06394 MCInst TmpInst; 06395 // Shuffle the operands around so the lane index operand is in the 06396 // right place. 06397 unsigned Spacing; 06398 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 06399 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06400 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06401 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06402 TmpInst.addOperand(Inst.getOperand(4)); // Rm 06403 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06404 TmpInst.addOperand(Inst.getOperand(1)); // lane 06405 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 06406 TmpInst.addOperand(Inst.getOperand(6)); 06407 Inst = TmpInst; 06408 return true; 06409 } 06410 06411 case ARM::VST2LNdWB_register_Asm_8: 06412 case ARM::VST2LNdWB_register_Asm_16: 06413 case ARM::VST2LNdWB_register_Asm_32: 06414 case ARM::VST2LNqWB_register_Asm_16: 06415 case ARM::VST2LNqWB_register_Asm_32: { 06416 MCInst TmpInst; 06417 // Shuffle the operands around so the lane index operand is in the 06418 // right place. 06419 unsigned Spacing; 06420 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 06421 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06422 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06423 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06424 TmpInst.addOperand(Inst.getOperand(4)); // Rm 06425 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06426 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06427 Spacing)); 06428 TmpInst.addOperand(Inst.getOperand(1)); // lane 06429 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 06430 TmpInst.addOperand(Inst.getOperand(6)); 06431 Inst = TmpInst; 06432 return true; 06433 } 06434 06435 case ARM::VST3LNdWB_register_Asm_8: 06436 case ARM::VST3LNdWB_register_Asm_16: 06437 case ARM::VST3LNdWB_register_Asm_32: 06438 case ARM::VST3LNqWB_register_Asm_16: 06439 case ARM::VST3LNqWB_register_Asm_32: { 06440 MCInst TmpInst; 06441 // Shuffle the operands around so the lane index operand is in the 06442 // right place. 06443 unsigned Spacing; 06444 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 06445 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06446 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06447 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06448 TmpInst.addOperand(Inst.getOperand(4)); // Rm 06449 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06450 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06451 Spacing)); 06452 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06453 Spacing * 2)); 06454 TmpInst.addOperand(Inst.getOperand(1)); // lane 06455 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 06456 TmpInst.addOperand(Inst.getOperand(6)); 06457 Inst = TmpInst; 06458 return true; 06459 } 06460 06461 case ARM::VST4LNdWB_register_Asm_8: 06462 case ARM::VST4LNdWB_register_Asm_16: 06463 case ARM::VST4LNdWB_register_Asm_32: 06464 case ARM::VST4LNqWB_register_Asm_16: 06465 case ARM::VST4LNqWB_register_Asm_32: { 06466 MCInst TmpInst; 06467 // Shuffle the operands around so the lane index operand is in the 06468 // right place. 06469 unsigned Spacing; 06470 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 06471 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06472 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06473 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06474 TmpInst.addOperand(Inst.getOperand(4)); // Rm 06475 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06476 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06477 Spacing)); 06478 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06479 Spacing * 2)); 06480 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06481 Spacing * 3)); 06482 TmpInst.addOperand(Inst.getOperand(1)); // lane 06483 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 06484 TmpInst.addOperand(Inst.getOperand(6)); 06485 Inst = TmpInst; 06486 return true; 06487 } 06488 06489 case ARM::VST1LNdWB_fixed_Asm_8: 06490 case ARM::VST1LNdWB_fixed_Asm_16: 06491 case ARM::VST1LNdWB_fixed_Asm_32: { 06492 MCInst TmpInst; 06493 // Shuffle the operands around so the lane index operand is in the 06494 // right place. 06495 unsigned Spacing; 06496 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 06497 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06498 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06499 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06500 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 06501 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06502 TmpInst.addOperand(Inst.getOperand(1)); // lane 06503 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06504 TmpInst.addOperand(Inst.getOperand(5)); 06505 Inst = TmpInst; 06506 return true; 06507 } 06508 06509 case ARM::VST2LNdWB_fixed_Asm_8: 06510 case ARM::VST2LNdWB_fixed_Asm_16: 06511 case ARM::VST2LNdWB_fixed_Asm_32: 06512 case ARM::VST2LNqWB_fixed_Asm_16: 06513 case ARM::VST2LNqWB_fixed_Asm_32: { 06514 MCInst TmpInst; 06515 // Shuffle the operands around so the lane index operand is in the 06516 // right place. 06517 unsigned Spacing; 06518 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 06519 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06520 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06521 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06522 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 06523 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06524 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06525 Spacing)); 06526 TmpInst.addOperand(Inst.getOperand(1)); // lane 06527 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06528 TmpInst.addOperand(Inst.getOperand(5)); 06529 Inst = TmpInst; 06530 return true; 06531 } 06532 06533 case ARM::VST3LNdWB_fixed_Asm_8: 06534 case ARM::VST3LNdWB_fixed_Asm_16: 06535 case ARM::VST3LNdWB_fixed_Asm_32: 06536 case ARM::VST3LNqWB_fixed_Asm_16: 06537 case ARM::VST3LNqWB_fixed_Asm_32: { 06538 MCInst TmpInst; 06539 // Shuffle the operands around so the lane index operand is in the 06540 // right place. 06541 unsigned Spacing; 06542 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 06543 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06544 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06545 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06546 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 06547 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06548 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06549 Spacing)); 06550 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06551 Spacing * 2)); 06552 TmpInst.addOperand(Inst.getOperand(1)); // lane 06553 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06554 TmpInst.addOperand(Inst.getOperand(5)); 06555 Inst = TmpInst; 06556 return true; 06557 } 06558 06559 case ARM::VST4LNdWB_fixed_Asm_8: 06560 case ARM::VST4LNdWB_fixed_Asm_16: 06561 case ARM::VST4LNdWB_fixed_Asm_32: 06562 case ARM::VST4LNqWB_fixed_Asm_16: 06563 case ARM::VST4LNqWB_fixed_Asm_32: { 06564 MCInst TmpInst; 06565 // Shuffle the operands around so the lane index operand is in the 06566 // right place. 06567 unsigned Spacing; 06568 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 06569 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06570 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06571 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06572 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 06573 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06574 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06575 Spacing)); 06576 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06577 Spacing * 2)); 06578 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06579 Spacing * 3)); 06580 TmpInst.addOperand(Inst.getOperand(1)); // lane 06581 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06582 TmpInst.addOperand(Inst.getOperand(5)); 06583 Inst = TmpInst; 06584 return true; 06585 } 06586 06587 case ARM::VST1LNdAsm_8: 06588 case ARM::VST1LNdAsm_16: 06589 case ARM::VST1LNdAsm_32: { 06590 MCInst TmpInst; 06591 // Shuffle the operands around so the lane index operand is in the 06592 // right place. 06593 unsigned Spacing; 06594 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 06595 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06596 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06597 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06598 TmpInst.addOperand(Inst.getOperand(1)); // lane 06599 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06600 TmpInst.addOperand(Inst.getOperand(5)); 06601 Inst = TmpInst; 06602 return true; 06603 } 06604 06605 case ARM::VST2LNdAsm_8: 06606 case ARM::VST2LNdAsm_16: 06607 case ARM::VST2LNdAsm_32: 06608 case ARM::VST2LNqAsm_16: 06609 case ARM::VST2LNqAsm_32: { 06610 MCInst TmpInst; 06611 // Shuffle the operands around so the lane index operand is in the 06612 // right place. 06613 unsigned Spacing; 06614 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 06615 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06616 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06617 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06618 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06619 Spacing)); 06620 TmpInst.addOperand(Inst.getOperand(1)); // lane 06621 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06622 TmpInst.addOperand(Inst.getOperand(5)); 06623 Inst = TmpInst; 06624 return true; 06625 } 06626 06627 case ARM::VST3LNdAsm_8: 06628 case ARM::VST3LNdAsm_16: 06629 case ARM::VST3LNdAsm_32: 06630 case ARM::VST3LNqAsm_16: 06631 case ARM::VST3LNqAsm_32: { 06632 MCInst TmpInst; 06633 // Shuffle the operands around so the lane index operand is in the 06634 // right place. 06635 unsigned Spacing; 06636 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 06637 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06638 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06639 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06640 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06641 Spacing)); 06642 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06643 Spacing * 2)); 06644 TmpInst.addOperand(Inst.getOperand(1)); // lane 06645 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06646 TmpInst.addOperand(Inst.getOperand(5)); 06647 Inst = TmpInst; 06648 return true; 06649 } 06650 06651 case ARM::VST4LNdAsm_8: 06652 case ARM::VST4LNdAsm_16: 06653 case ARM::VST4LNdAsm_32: 06654 case ARM::VST4LNqAsm_16: 06655 case ARM::VST4LNqAsm_32: { 06656 MCInst TmpInst; 06657 // Shuffle the operands around so the lane index operand is in the 06658 // right place. 06659 unsigned Spacing; 06660 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 06661 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06662 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06663 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06664 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06665 Spacing)); 06666 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06667 Spacing * 2)); 06668 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06669 Spacing * 3)); 06670 TmpInst.addOperand(Inst.getOperand(1)); // lane 06671 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06672 TmpInst.addOperand(Inst.getOperand(5)); 06673 Inst = TmpInst; 06674 return true; 06675 } 06676 06677 // Handle NEON VLD complex aliases. 06678 case ARM::VLD1LNdWB_register_Asm_8: 06679 case ARM::VLD1LNdWB_register_Asm_16: 06680 case ARM::VLD1LNdWB_register_Asm_32: { 06681 MCInst TmpInst; 06682 // Shuffle the operands around so the lane index operand is in the 06683 // right place. 06684 unsigned Spacing; 06685 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 06686 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06687 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06688 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06689 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06690 TmpInst.addOperand(Inst.getOperand(4)); // Rm 06691 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 06692 TmpInst.addOperand(Inst.getOperand(1)); // lane 06693 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 06694 TmpInst.addOperand(Inst.getOperand(6)); 06695 Inst = TmpInst; 06696 return true; 06697 } 06698 06699 case ARM::VLD2LNdWB_register_Asm_8: 06700 case ARM::VLD2LNdWB_register_Asm_16: 06701 case ARM::VLD2LNdWB_register_Asm_32: 06702 case ARM::VLD2LNqWB_register_Asm_16: 06703 case ARM::VLD2LNqWB_register_Asm_32: { 06704 MCInst TmpInst; 06705 // Shuffle the operands around so the lane index operand is in the 06706 // right place. 06707 unsigned Spacing; 06708 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 06709 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06710 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06711 Spacing)); 06712 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06713 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06714 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06715 TmpInst.addOperand(Inst.getOperand(4)); // Rm 06716 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 06717 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06718 Spacing)); 06719 TmpInst.addOperand(Inst.getOperand(1)); // lane 06720 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 06721 TmpInst.addOperand(Inst.getOperand(6)); 06722 Inst = TmpInst; 06723 return true; 06724 } 06725 06726 case ARM::VLD3LNdWB_register_Asm_8: 06727 case ARM::VLD3LNdWB_register_Asm_16: 06728 case ARM::VLD3LNdWB_register_Asm_32: 06729 case ARM::VLD3LNqWB_register_Asm_16: 06730 case ARM::VLD3LNqWB_register_Asm_32: { 06731 MCInst TmpInst; 06732 // Shuffle the operands around so the lane index operand is in the 06733 // right place. 06734 unsigned Spacing; 06735 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 06736 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06737 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06738 Spacing)); 06739 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06740 Spacing * 2)); 06741 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06742 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06743 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06744 TmpInst.addOperand(Inst.getOperand(4)); // Rm 06745 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 06746 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06747 Spacing)); 06748 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06749 Spacing * 2)); 06750 TmpInst.addOperand(Inst.getOperand(1)); // lane 06751 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 06752 TmpInst.addOperand(Inst.getOperand(6)); 06753 Inst = TmpInst; 06754 return true; 06755 } 06756 06757 case ARM::VLD4LNdWB_register_Asm_8: 06758 case ARM::VLD4LNdWB_register_Asm_16: 06759 case ARM::VLD4LNdWB_register_Asm_32: 06760 case ARM::VLD4LNqWB_register_Asm_16: 06761 case ARM::VLD4LNqWB_register_Asm_32: { 06762 MCInst TmpInst; 06763 // Shuffle the operands around so the lane index operand is in the 06764 // right place. 06765 unsigned Spacing; 06766 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 06767 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06768 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06769 Spacing)); 06770 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06771 Spacing * 2)); 06772 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06773 Spacing * 3)); 06774 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06775 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06776 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06777 TmpInst.addOperand(Inst.getOperand(4)); // Rm 06778 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 06779 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06780 Spacing)); 06781 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06782 Spacing * 2)); 06783 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06784 Spacing * 3)); 06785 TmpInst.addOperand(Inst.getOperand(1)); // lane 06786 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 06787 TmpInst.addOperand(Inst.getOperand(6)); 06788 Inst = TmpInst; 06789 return true; 06790 } 06791 06792 case ARM::VLD1LNdWB_fixed_Asm_8: 06793 case ARM::VLD1LNdWB_fixed_Asm_16: 06794 case ARM::VLD1LNdWB_fixed_Asm_32: { 06795 MCInst TmpInst; 06796 // Shuffle the operands around so the lane index operand is in the 06797 // right place. 06798 unsigned Spacing; 06799 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 06800 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06801 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06802 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06803 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06804 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 06805 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 06806 TmpInst.addOperand(Inst.getOperand(1)); // lane 06807 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06808 TmpInst.addOperand(Inst.getOperand(5)); 06809 Inst = TmpInst; 06810 return true; 06811 } 06812 06813 case ARM::VLD2LNdWB_fixed_Asm_8: 06814 case ARM::VLD2LNdWB_fixed_Asm_16: 06815 case ARM::VLD2LNdWB_fixed_Asm_32: 06816 case ARM::VLD2LNqWB_fixed_Asm_16: 06817 case ARM::VLD2LNqWB_fixed_Asm_32: { 06818 MCInst TmpInst; 06819 // Shuffle the operands around so the lane index operand is in the 06820 // right place. 06821 unsigned Spacing; 06822 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 06823 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06824 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06825 Spacing)); 06826 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06827 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06828 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06829 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 06830 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 06831 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06832 Spacing)); 06833 TmpInst.addOperand(Inst.getOperand(1)); // lane 06834 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06835 TmpInst.addOperand(Inst.getOperand(5)); 06836 Inst = TmpInst; 06837 return true; 06838 } 06839 06840 case ARM::VLD3LNdWB_fixed_Asm_8: 06841 case ARM::VLD3LNdWB_fixed_Asm_16: 06842 case ARM::VLD3LNdWB_fixed_Asm_32: 06843 case ARM::VLD3LNqWB_fixed_Asm_16: 06844 case ARM::VLD3LNqWB_fixed_Asm_32: { 06845 MCInst TmpInst; 06846 // Shuffle the operands around so the lane index operand is in the 06847 // right place. 06848 unsigned Spacing; 06849 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 06850 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06851 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06852 Spacing)); 06853 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06854 Spacing * 2)); 06855 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06856 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06857 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06858 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 06859 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 06860 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06861 Spacing)); 06862 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06863 Spacing * 2)); 06864 TmpInst.addOperand(Inst.getOperand(1)); // lane 06865 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06866 TmpInst.addOperand(Inst.getOperand(5)); 06867 Inst = TmpInst; 06868 return true; 06869 } 06870 06871 case ARM::VLD4LNdWB_fixed_Asm_8: 06872 case ARM::VLD4LNdWB_fixed_Asm_16: 06873 case ARM::VLD4LNdWB_fixed_Asm_32: 06874 case ARM::VLD4LNqWB_fixed_Asm_16: 06875 case ARM::VLD4LNqWB_fixed_Asm_32: { 06876 MCInst TmpInst; 06877 // Shuffle the operands around so the lane index operand is in the 06878 // right place. 06879 unsigned Spacing; 06880 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 06881 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06882 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06883 Spacing)); 06884 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06885 Spacing * 2)); 06886 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06887 Spacing * 3)); 06888 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 06889 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06890 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06891 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 06892 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 06893 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06894 Spacing)); 06895 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06896 Spacing * 2)); 06897 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06898 Spacing * 3)); 06899 TmpInst.addOperand(Inst.getOperand(1)); // lane 06900 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06901 TmpInst.addOperand(Inst.getOperand(5)); 06902 Inst = TmpInst; 06903 return true; 06904 } 06905 06906 case ARM::VLD1LNdAsm_8: 06907 case ARM::VLD1LNdAsm_16: 06908 case ARM::VLD1LNdAsm_32: { 06909 MCInst TmpInst; 06910 // Shuffle the operands around so the lane index operand is in the 06911 // right place. 06912 unsigned Spacing; 06913 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 06914 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06915 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06916 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06917 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 06918 TmpInst.addOperand(Inst.getOperand(1)); // lane 06919 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06920 TmpInst.addOperand(Inst.getOperand(5)); 06921 Inst = TmpInst; 06922 return true; 06923 } 06924 06925 case ARM::VLD2LNdAsm_8: 06926 case ARM::VLD2LNdAsm_16: 06927 case ARM::VLD2LNdAsm_32: 06928 case ARM::VLD2LNqAsm_16: 06929 case ARM::VLD2LNqAsm_32: { 06930 MCInst TmpInst; 06931 // Shuffle the operands around so the lane index operand is in the 06932 // right place. 06933 unsigned Spacing; 06934 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 06935 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06936 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06937 Spacing)); 06938 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06939 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06940 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 06941 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06942 Spacing)); 06943 TmpInst.addOperand(Inst.getOperand(1)); // lane 06944 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06945 TmpInst.addOperand(Inst.getOperand(5)); 06946 Inst = TmpInst; 06947 return true; 06948 } 06949 06950 case ARM::VLD3LNdAsm_8: 06951 case ARM::VLD3LNdAsm_16: 06952 case ARM::VLD3LNdAsm_32: 06953 case ARM::VLD3LNqAsm_16: 06954 case ARM::VLD3LNqAsm_32: { 06955 MCInst TmpInst; 06956 // Shuffle the operands around so the lane index operand is in the 06957 // right place. 06958 unsigned Spacing; 06959 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 06960 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06961 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06962 Spacing)); 06963 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06964 Spacing * 2)); 06965 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06966 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06967 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 06968 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06969 Spacing)); 06970 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06971 Spacing * 2)); 06972 TmpInst.addOperand(Inst.getOperand(1)); // lane 06973 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 06974 TmpInst.addOperand(Inst.getOperand(5)); 06975 Inst = TmpInst; 06976 return true; 06977 } 06978 06979 case ARM::VLD4LNdAsm_8: 06980 case ARM::VLD4LNdAsm_16: 06981 case ARM::VLD4LNdAsm_32: 06982 case ARM::VLD4LNqAsm_16: 06983 case ARM::VLD4LNqAsm_32: { 06984 MCInst TmpInst; 06985 // Shuffle the operands around so the lane index operand is in the 06986 // right place. 06987 unsigned Spacing; 06988 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 06989 TmpInst.addOperand(Inst.getOperand(0)); // Vd 06990 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06991 Spacing)); 06992 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06993 Spacing * 2)); 06994 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 06995 Spacing * 3)); 06996 TmpInst.addOperand(Inst.getOperand(2)); // Rn 06997 TmpInst.addOperand(Inst.getOperand(3)); // alignment 06998 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 06999 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07000 Spacing)); 07001 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07002 Spacing * 2)); 07003 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07004 Spacing * 3)); 07005 TmpInst.addOperand(Inst.getOperand(1)); // lane 07006 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 07007 TmpInst.addOperand(Inst.getOperand(5)); 07008 Inst = TmpInst; 07009 return true; 07010 } 07011 07012 // VLD3DUP single 3-element structure to all lanes instructions. 07013 case ARM::VLD3DUPdAsm_8: 07014 case ARM::VLD3DUPdAsm_16: 07015 case ARM::VLD3DUPdAsm_32: 07016 case ARM::VLD3DUPqAsm_8: 07017 case ARM::VLD3DUPqAsm_16: 07018 case ARM::VLD3DUPqAsm_32: { 07019 MCInst TmpInst; 07020 unsigned Spacing; 07021 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 07022 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07023 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07024 Spacing)); 07025 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07026 Spacing * 2)); 07027 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07028 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07029 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07030 TmpInst.addOperand(Inst.getOperand(4)); 07031 Inst = TmpInst; 07032 return true; 07033 } 07034 07035 case ARM::VLD3DUPdWB_fixed_Asm_8: 07036 case ARM::VLD3DUPdWB_fixed_Asm_16: 07037 case ARM::VLD3DUPdWB_fixed_Asm_32: 07038 case ARM::VLD3DUPqWB_fixed_Asm_8: 07039 case ARM::VLD3DUPqWB_fixed_Asm_16: 07040 case ARM::VLD3DUPqWB_fixed_Asm_32: { 07041 MCInst TmpInst; 07042 unsigned Spacing; 07043 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 07044 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07045 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07046 Spacing)); 07047 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07048 Spacing * 2)); 07049 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07050 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 07051 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07052 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 07053 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07054 TmpInst.addOperand(Inst.getOperand(4)); 07055 Inst = TmpInst; 07056 return true; 07057 } 07058 07059 case ARM::VLD3DUPdWB_register_Asm_8: 07060 case ARM::VLD3DUPdWB_register_Asm_16: 07061 case ARM::VLD3DUPdWB_register_Asm_32: 07062 case ARM::VLD3DUPqWB_register_Asm_8: 07063 case ARM::VLD3DUPqWB_register_Asm_16: 07064 case ARM::VLD3DUPqWB_register_Asm_32: { 07065 MCInst TmpInst; 07066 unsigned Spacing; 07067 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 07068 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07069 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07070 Spacing)); 07071 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07072 Spacing * 2)); 07073 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07074 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 07075 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07076 TmpInst.addOperand(Inst.getOperand(3)); // Rm 07077 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 07078 TmpInst.addOperand(Inst.getOperand(5)); 07079 Inst = TmpInst; 07080 return true; 07081 } 07082 07083 // VLD3 multiple 3-element structure instructions. 07084 case ARM::VLD3dAsm_8: 07085 case ARM::VLD3dAsm_16: 07086 case ARM::VLD3dAsm_32: 07087 case ARM::VLD3qAsm_8: 07088 case ARM::VLD3qAsm_16: 07089 case ARM::VLD3qAsm_32: { 07090 MCInst TmpInst; 07091 unsigned Spacing; 07092 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 07093 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07094 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07095 Spacing)); 07096 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07097 Spacing * 2)); 07098 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07099 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07100 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07101 TmpInst.addOperand(Inst.getOperand(4)); 07102 Inst = TmpInst; 07103 return true; 07104 } 07105 07106 case ARM::VLD3dWB_fixed_Asm_8: 07107 case ARM::VLD3dWB_fixed_Asm_16: 07108 case ARM::VLD3dWB_fixed_Asm_32: 07109 case ARM::VLD3qWB_fixed_Asm_8: 07110 case ARM::VLD3qWB_fixed_Asm_16: 07111 case ARM::VLD3qWB_fixed_Asm_32: { 07112 MCInst TmpInst; 07113 unsigned Spacing; 07114 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 07115 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07116 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07117 Spacing)); 07118 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07119 Spacing * 2)); 07120 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07121 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 07122 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07123 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 07124 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07125 TmpInst.addOperand(Inst.getOperand(4)); 07126 Inst = TmpInst; 07127 return true; 07128 } 07129 07130 case ARM::VLD3dWB_register_Asm_8: 07131 case ARM::VLD3dWB_register_Asm_16: 07132 case ARM::VLD3dWB_register_Asm_32: 07133 case ARM::VLD3qWB_register_Asm_8: 07134 case ARM::VLD3qWB_register_Asm_16: 07135 case ARM::VLD3qWB_register_Asm_32: { 07136 MCInst TmpInst; 07137 unsigned Spacing; 07138 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 07139 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07140 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07141 Spacing)); 07142 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07143 Spacing * 2)); 07144 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07145 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 07146 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07147 TmpInst.addOperand(Inst.getOperand(3)); // Rm 07148 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 07149 TmpInst.addOperand(Inst.getOperand(5)); 07150 Inst = TmpInst; 07151 return true; 07152 } 07153 07154 // VLD4DUP single 3-element structure to all lanes instructions. 07155 case ARM::VLD4DUPdAsm_8: 07156 case ARM::VLD4DUPdAsm_16: 07157 case ARM::VLD4DUPdAsm_32: 07158 case ARM::VLD4DUPqAsm_8: 07159 case ARM::VLD4DUPqAsm_16: 07160 case ARM::VLD4DUPqAsm_32: { 07161 MCInst TmpInst; 07162 unsigned Spacing; 07163 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 07164 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07165 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07166 Spacing)); 07167 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07168 Spacing * 2)); 07169 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07170 Spacing * 3)); 07171 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07172 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07173 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07174 TmpInst.addOperand(Inst.getOperand(4)); 07175 Inst = TmpInst; 07176 return true; 07177 } 07178 07179 case ARM::VLD4DUPdWB_fixed_Asm_8: 07180 case ARM::VLD4DUPdWB_fixed_Asm_16: 07181 case ARM::VLD4DUPdWB_fixed_Asm_32: 07182 case ARM::VLD4DUPqWB_fixed_Asm_8: 07183 case ARM::VLD4DUPqWB_fixed_Asm_16: 07184 case ARM::VLD4DUPqWB_fixed_Asm_32: { 07185 MCInst TmpInst; 07186 unsigned Spacing; 07187 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 07188 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07189 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07190 Spacing)); 07191 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07192 Spacing * 2)); 07193 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07194 Spacing * 3)); 07195 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07196 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 07197 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07198 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 07199 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07200 TmpInst.addOperand(Inst.getOperand(4)); 07201 Inst = TmpInst; 07202 return true; 07203 } 07204 07205 case ARM::VLD4DUPdWB_register_Asm_8: 07206 case ARM::VLD4DUPdWB_register_Asm_16: 07207 case ARM::VLD4DUPdWB_register_Asm_32: 07208 case ARM::VLD4DUPqWB_register_Asm_8: 07209 case ARM::VLD4DUPqWB_register_Asm_16: 07210 case ARM::VLD4DUPqWB_register_Asm_32: { 07211 MCInst TmpInst; 07212 unsigned Spacing; 07213 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 07214 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07215 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07216 Spacing)); 07217 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07218 Spacing * 2)); 07219 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07220 Spacing * 3)); 07221 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07222 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 07223 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07224 TmpInst.addOperand(Inst.getOperand(3)); // Rm 07225 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 07226 TmpInst.addOperand(Inst.getOperand(5)); 07227 Inst = TmpInst; 07228 return true; 07229 } 07230 07231 // VLD4 multiple 4-element structure instructions. 07232 case ARM::VLD4dAsm_8: 07233 case ARM::VLD4dAsm_16: 07234 case ARM::VLD4dAsm_32: 07235 case ARM::VLD4qAsm_8: 07236 case ARM::VLD4qAsm_16: 07237 case ARM::VLD4qAsm_32: { 07238 MCInst TmpInst; 07239 unsigned Spacing; 07240 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 07241 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07242 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07243 Spacing)); 07244 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07245 Spacing * 2)); 07246 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07247 Spacing * 3)); 07248 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07249 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07250 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07251 TmpInst.addOperand(Inst.getOperand(4)); 07252 Inst = TmpInst; 07253 return true; 07254 } 07255 07256 case ARM::VLD4dWB_fixed_Asm_8: 07257 case ARM::VLD4dWB_fixed_Asm_16: 07258 case ARM::VLD4dWB_fixed_Asm_32: 07259 case ARM::VLD4qWB_fixed_Asm_8: 07260 case ARM::VLD4qWB_fixed_Asm_16: 07261 case ARM::VLD4qWB_fixed_Asm_32: { 07262 MCInst TmpInst; 07263 unsigned Spacing; 07264 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 07265 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07266 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07267 Spacing)); 07268 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07269 Spacing * 2)); 07270 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07271 Spacing * 3)); 07272 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07273 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 07274 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07275 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 07276 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07277 TmpInst.addOperand(Inst.getOperand(4)); 07278 Inst = TmpInst; 07279 return true; 07280 } 07281 07282 case ARM::VLD4dWB_register_Asm_8: 07283 case ARM::VLD4dWB_register_Asm_16: 07284 case ARM::VLD4dWB_register_Asm_32: 07285 case ARM::VLD4qWB_register_Asm_8: 07286 case ARM::VLD4qWB_register_Asm_16: 07287 case ARM::VLD4qWB_register_Asm_32: { 07288 MCInst TmpInst; 07289 unsigned Spacing; 07290 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 07291 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07292 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07293 Spacing)); 07294 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07295 Spacing * 2)); 07296 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07297 Spacing * 3)); 07298 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07299 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 07300 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07301 TmpInst.addOperand(Inst.getOperand(3)); // Rm 07302 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 07303 TmpInst.addOperand(Inst.getOperand(5)); 07304 Inst = TmpInst; 07305 return true; 07306 } 07307 07308 // VST3 multiple 3-element structure instructions. 07309 case ARM::VST3dAsm_8: 07310 case ARM::VST3dAsm_16: 07311 case ARM::VST3dAsm_32: 07312 case ARM::VST3qAsm_8: 07313 case ARM::VST3qAsm_16: 07314 case ARM::VST3qAsm_32: { 07315 MCInst TmpInst; 07316 unsigned Spacing; 07317 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 07318 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07319 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07320 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07321 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07322 Spacing)); 07323 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07324 Spacing * 2)); 07325 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07326 TmpInst.addOperand(Inst.getOperand(4)); 07327 Inst = TmpInst; 07328 return true; 07329 } 07330 07331 case ARM::VST3dWB_fixed_Asm_8: 07332 case ARM::VST3dWB_fixed_Asm_16: 07333 case ARM::VST3dWB_fixed_Asm_32: 07334 case ARM::VST3qWB_fixed_Asm_8: 07335 case ARM::VST3qWB_fixed_Asm_16: 07336 case ARM::VST3qWB_fixed_Asm_32: { 07337 MCInst TmpInst; 07338 unsigned Spacing; 07339 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 07340 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07341 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 07342 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07343 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 07344 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07345 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07346 Spacing)); 07347 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07348 Spacing * 2)); 07349 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07350 TmpInst.addOperand(Inst.getOperand(4)); 07351 Inst = TmpInst; 07352 return true; 07353 } 07354 07355 case ARM::VST3dWB_register_Asm_8: 07356 case ARM::VST3dWB_register_Asm_16: 07357 case ARM::VST3dWB_register_Asm_32: 07358 case ARM::VST3qWB_register_Asm_8: 07359 case ARM::VST3qWB_register_Asm_16: 07360 case ARM::VST3qWB_register_Asm_32: { 07361 MCInst TmpInst; 07362 unsigned Spacing; 07363 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 07364 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07365 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 07366 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07367 TmpInst.addOperand(Inst.getOperand(3)); // Rm 07368 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07369 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07370 Spacing)); 07371 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07372 Spacing * 2)); 07373 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 07374 TmpInst.addOperand(Inst.getOperand(5)); 07375 Inst = TmpInst; 07376 return true; 07377 } 07378 07379 // VST4 multiple 3-element structure instructions. 07380 case ARM::VST4dAsm_8: 07381 case ARM::VST4dAsm_16: 07382 case ARM::VST4dAsm_32: 07383 case ARM::VST4qAsm_8: 07384 case ARM::VST4qAsm_16: 07385 case ARM::VST4qAsm_32: { 07386 MCInst TmpInst; 07387 unsigned Spacing; 07388 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 07389 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07390 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07391 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07392 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07393 Spacing)); 07394 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07395 Spacing * 2)); 07396 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07397 Spacing * 3)); 07398 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07399 TmpInst.addOperand(Inst.getOperand(4)); 07400 Inst = TmpInst; 07401 return true; 07402 } 07403 07404 case ARM::VST4dWB_fixed_Asm_8: 07405 case ARM::VST4dWB_fixed_Asm_16: 07406 case ARM::VST4dWB_fixed_Asm_32: 07407 case ARM::VST4qWB_fixed_Asm_8: 07408 case ARM::VST4qWB_fixed_Asm_16: 07409 case ARM::VST4qWB_fixed_Asm_32: { 07410 MCInst TmpInst; 07411 unsigned Spacing; 07412 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 07413 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07414 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 07415 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07416 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 07417 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07418 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07419 Spacing)); 07420 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07421 Spacing * 2)); 07422 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07423 Spacing * 3)); 07424 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07425 TmpInst.addOperand(Inst.getOperand(4)); 07426 Inst = TmpInst; 07427 return true; 07428 } 07429 07430 case ARM::VST4dWB_register_Asm_8: 07431 case ARM::VST4dWB_register_Asm_16: 07432 case ARM::VST4dWB_register_Asm_32: 07433 case ARM::VST4qWB_register_Asm_8: 07434 case ARM::VST4qWB_register_Asm_16: 07435 case ARM::VST4qWB_register_Asm_32: { 07436 MCInst TmpInst; 07437 unsigned Spacing; 07438 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 07439 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07440 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 07441 TmpInst.addOperand(Inst.getOperand(2)); // alignment 07442 TmpInst.addOperand(Inst.getOperand(3)); // Rm 07443 TmpInst.addOperand(Inst.getOperand(0)); // Vd 07444 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07445 Spacing)); 07446 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07447 Spacing * 2)); 07448 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 07449 Spacing * 3)); 07450 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 07451 TmpInst.addOperand(Inst.getOperand(5)); 07452 Inst = TmpInst; 07453 return true; 07454 } 07455 07456 // Handle encoding choice for the shift-immediate instructions. 07457 case ARM::t2LSLri: 07458 case ARM::t2LSRri: 07459 case ARM::t2ASRri: { 07460 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 07461 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && 07462 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) && 07463 !(static_cast<ARMOperand &>(*Operands[3]).isToken() && 07464 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w")) { 07465 unsigned NewOpc; 07466 switch (Inst.getOpcode()) { 07467 default: llvm_unreachable("unexpected opcode"); 07468 case ARM::t2LSLri: NewOpc = ARM::tLSLri; break; 07469 case ARM::t2LSRri: NewOpc = ARM::tLSRri; break; 07470 case ARM::t2ASRri: NewOpc = ARM::tASRri; break; 07471 } 07472 // The Thumb1 operands aren't in the same order. Awesome, eh? 07473 MCInst TmpInst; 07474 TmpInst.setOpcode(NewOpc); 07475 TmpInst.addOperand(Inst.getOperand(0)); 07476 TmpInst.addOperand(Inst.getOperand(5)); 07477 TmpInst.addOperand(Inst.getOperand(1)); 07478 TmpInst.addOperand(Inst.getOperand(2)); 07479 TmpInst.addOperand(Inst.getOperand(3)); 07480 TmpInst.addOperand(Inst.getOperand(4)); 07481 Inst = TmpInst; 07482 return true; 07483 } 07484 return false; 07485 } 07486 07487 // Handle the Thumb2 mode MOV complex aliases. 07488 case ARM::t2MOVsr: 07489 case ARM::t2MOVSsr: { 07490 // Which instruction to expand to depends on the CCOut operand and 07491 // whether we're in an IT block if the register operands are low 07492 // registers. 07493 bool isNarrow = false; 07494 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 07495 isARMLowRegister(Inst.getOperand(1).getReg()) && 07496 isARMLowRegister(Inst.getOperand(2).getReg()) && 07497 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && 07498 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr)) 07499 isNarrow = true; 07500 MCInst TmpInst; 07501 unsigned newOpc; 07502 switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) { 07503 default: llvm_unreachable("unexpected opcode!"); 07504 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break; 07505 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break; 07506 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break; 07507 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break; 07508 } 07509 TmpInst.setOpcode(newOpc); 07510 TmpInst.addOperand(Inst.getOperand(0)); // Rd 07511 if (isNarrow) 07512 TmpInst.addOperand(MCOperand::CreateReg( 07513 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0)); 07514 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07515 TmpInst.addOperand(Inst.getOperand(2)); // Rm 07516 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 07517 TmpInst.addOperand(Inst.getOperand(5)); 07518 if (!isNarrow) 07519 TmpInst.addOperand(MCOperand::CreateReg( 07520 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0)); 07521 Inst = TmpInst; 07522 return true; 07523 } 07524 case ARM::t2MOVsi: 07525 case ARM::t2MOVSsi: { 07526 // Which instruction to expand to depends on the CCOut operand and 07527 // whether we're in an IT block if the register operands are low 07528 // registers. 07529 bool isNarrow = false; 07530 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 07531 isARMLowRegister(Inst.getOperand(1).getReg()) && 07532 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi)) 07533 isNarrow = true; 07534 MCInst TmpInst; 07535 unsigned newOpc; 07536 switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) { 07537 default: llvm_unreachable("unexpected opcode!"); 07538 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break; 07539 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break; 07540 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break; 07541 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break; 07542 case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break; 07543 } 07544 unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()); 07545 if (Amount == 32) Amount = 0; 07546 TmpInst.setOpcode(newOpc); 07547 TmpInst.addOperand(Inst.getOperand(0)); // Rd 07548 if (isNarrow) 07549 TmpInst.addOperand(MCOperand::CreateReg( 07550 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); 07551 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07552 if (newOpc != ARM::t2RRX) 07553 TmpInst.addOperand(MCOperand::CreateImm(Amount)); 07554 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07555 TmpInst.addOperand(Inst.getOperand(4)); 07556 if (!isNarrow) 07557 TmpInst.addOperand(MCOperand::CreateReg( 07558 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); 07559 Inst = TmpInst; 07560 return true; 07561 } 07562 // Handle the ARM mode MOV complex aliases. 07563 case ARM::ASRr: 07564 case ARM::LSRr: 07565 case ARM::LSLr: 07566 case ARM::RORr: { 07567 ARM_AM::ShiftOpc ShiftTy; 07568 switch(Inst.getOpcode()) { 07569 default: llvm_unreachable("unexpected opcode!"); 07570 case ARM::ASRr: ShiftTy = ARM_AM::asr; break; 07571 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break; 07572 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break; 07573 case ARM::RORr: ShiftTy = ARM_AM::ror; break; 07574 } 07575 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0); 07576 MCInst TmpInst; 07577 TmpInst.setOpcode(ARM::MOVsr); 07578 TmpInst.addOperand(Inst.getOperand(0)); // Rd 07579 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07580 TmpInst.addOperand(Inst.getOperand(2)); // Rm 07581 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 07582 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07583 TmpInst.addOperand(Inst.getOperand(4)); 07584 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 07585 Inst = TmpInst; 07586 return true; 07587 } 07588 case ARM::ASRi: 07589 case ARM::LSRi: 07590 case ARM::LSLi: 07591 case ARM::RORi: { 07592 ARM_AM::ShiftOpc ShiftTy; 07593 switch(Inst.getOpcode()) { 07594 default: llvm_unreachable("unexpected opcode!"); 07595 case ARM::ASRi: ShiftTy = ARM_AM::asr; break; 07596 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; 07597 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; 07598 case ARM::RORi: ShiftTy = ARM_AM::ror; break; 07599 } 07600 // A shift by zero is a plain MOVr, not a MOVsi. 07601 unsigned Amt = Inst.getOperand(2).getImm(); 07602 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; 07603 // A shift by 32 should be encoded as 0 when permitted 07604 if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr)) 07605 Amt = 0; 07606 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); 07607 MCInst TmpInst; 07608 TmpInst.setOpcode(Opc); 07609 TmpInst.addOperand(Inst.getOperand(0)); // Rd 07610 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07611 if (Opc == ARM::MOVsi) 07612 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 07613 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 07614 TmpInst.addOperand(Inst.getOperand(4)); 07615 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 07616 Inst = TmpInst; 07617 return true; 07618 } 07619 case ARM::RRXi: { 07620 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0); 07621 MCInst TmpInst; 07622 TmpInst.setOpcode(ARM::MOVsi); 07623 TmpInst.addOperand(Inst.getOperand(0)); // Rd 07624 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07625 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 07626 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 07627 TmpInst.addOperand(Inst.getOperand(3)); 07628 TmpInst.addOperand(Inst.getOperand(4)); // cc_out 07629 Inst = TmpInst; 07630 return true; 07631 } 07632 case ARM::t2LDMIA_UPD: { 07633 // If this is a load of a single register, then we should use 07634 // a post-indexed LDR instruction instead, per the ARM ARM. 07635 if (Inst.getNumOperands() != 5) 07636 return false; 07637 MCInst TmpInst; 07638 TmpInst.setOpcode(ARM::t2LDR_POST); 07639 TmpInst.addOperand(Inst.getOperand(4)); // Rt 07640 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 07641 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07642 TmpInst.addOperand(MCOperand::CreateImm(4)); 07643 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 07644 TmpInst.addOperand(Inst.getOperand(3)); 07645 Inst = TmpInst; 07646 return true; 07647 } 07648 case ARM::t2STMDB_UPD: { 07649 // If this is a store of a single register, then we should use 07650 // a pre-indexed STR instruction instead, per the ARM ARM. 07651 if (Inst.getNumOperands() != 5) 07652 return false; 07653 MCInst TmpInst; 07654 TmpInst.setOpcode(ARM::t2STR_PRE); 07655 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 07656 TmpInst.addOperand(Inst.getOperand(4)); // Rt 07657 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07658 TmpInst.addOperand(MCOperand::CreateImm(-4)); 07659 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 07660 TmpInst.addOperand(Inst.getOperand(3)); 07661 Inst = TmpInst; 07662 return true; 07663 } 07664 case ARM::LDMIA_UPD: 07665 // If this is a load of a single register via a 'pop', then we should use 07666 // a post-indexed LDR instruction instead, per the ARM ARM. 07667 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" && 07668 Inst.getNumOperands() == 5) { 07669 MCInst TmpInst; 07670 TmpInst.setOpcode(ARM::LDR_POST_IMM); 07671 TmpInst.addOperand(Inst.getOperand(4)); // Rt 07672 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 07673 TmpInst.addOperand(Inst.getOperand(1)); // Rn 07674 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 07675 TmpInst.addOperand(MCOperand::CreateImm(4)); 07676 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 07677 TmpInst.addOperand(Inst.getOperand(3)); 07678 Inst = TmpInst; 07679 return true; 07680 } 07681 break; 07682 case ARM::STMDB_UPD: 07683 // If this is a store of a single register via a 'push', then we should use 07684 // a pre-indexed STR instruction instead, per the ARM ARM. 07685 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" && 07686 Inst.getNumOperands() == 5) { 07687 MCInst TmpInst; 07688 TmpInst.setOpcode(ARM::STR_PRE_IMM); 07689 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 07690 TmpInst.addOperand(Inst.getOperand(4)); // Rt 07691 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 07692 TmpInst.addOperand(MCOperand::CreateImm(-4)); 07693 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 07694 TmpInst.addOperand(Inst.getOperand(3)); 07695 Inst = TmpInst; 07696 } 07697 break; 07698 case ARM::t2ADDri12: 07699 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add" 07700 // mnemonic was used (not "addw"), encoding T3 is preferred. 07701 if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" || 07702 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 07703 break; 07704 Inst.setOpcode(ARM::t2ADDri); 07705 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 07706 break; 07707 case ARM::t2SUBri12: 07708 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub" 07709 // mnemonic was used (not "subw"), encoding T3 is preferred. 07710 if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" || 07711 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 07712 break; 07713 Inst.setOpcode(ARM::t2SUBri); 07714 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 07715 break; 07716 case ARM::tADDi8: 07717 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 07718 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 07719 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 07720 // to encoding T1 if <Rd> is omitted." 07721 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 07722 Inst.setOpcode(ARM::tADDi3); 07723 return true; 07724 } 07725 break; 07726 case ARM::tSUBi8: 07727 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 07728 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 07729 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 07730 // to encoding T1 if <Rd> is omitted." 07731 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 07732 Inst.setOpcode(ARM::tSUBi3); 07733 return true; 07734 } 07735 break; 07736 case ARM::t2ADDri: 07737 case ARM::t2SUBri: { 07738 // If the destination and first source operand are the same, and 07739 // the flags are compatible with the current IT status, use encoding T2 07740 // instead of T3. For compatibility with the system 'as'. Make sure the 07741 // wide encoding wasn't explicit. 07742 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || 07743 !isARMLowRegister(Inst.getOperand(0).getReg()) || 07744 (unsigned)Inst.getOperand(2).getImm() > 255 || 07745 ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) || 07746 (inITBlock() && Inst.getOperand(5).getReg() != 0)) || 07747 (static_cast<ARMOperand &>(*Operands[3]).isToken() && 07748 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w")) 07749 break; 07750 MCInst TmpInst; 07751 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ? 07752 ARM::tADDi8 : ARM::tSUBi8); 07753 TmpInst.addOperand(Inst.getOperand(0)); 07754 TmpInst.addOperand(Inst.getOperand(5)); 07755 TmpInst.addOperand(Inst.getOperand(0)); 07756 TmpInst.addOperand(Inst.getOperand(2)); 07757 TmpInst.addOperand(Inst.getOperand(3)); 07758 TmpInst.addOperand(Inst.getOperand(4)); 07759 Inst = TmpInst; 07760 return true; 07761 } 07762 case ARM::t2ADDrr: { 07763 // If the destination and first source operand are the same, and 07764 // there's no setting of the flags, use encoding T2 instead of T3. 07765 // Note that this is only for ADD, not SUB. This mirrors the system 07766 // 'as' behaviour. Make sure the wide encoding wasn't explicit. 07767 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || 07768 Inst.getOperand(5).getReg() != 0 || 07769 (static_cast<ARMOperand &>(*Operands[3]).isToken() && 07770 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w")) 07771 break; 07772 MCInst TmpInst; 07773 TmpInst.setOpcode(ARM::tADDhirr); 07774 TmpInst.addOperand(Inst.getOperand(0)); 07775 TmpInst.addOperand(Inst.getOperand(0)); 07776 TmpInst.addOperand(Inst.getOperand(2)); 07777 TmpInst.addOperand(Inst.getOperand(3)); 07778 TmpInst.addOperand(Inst.getOperand(4)); 07779 Inst = TmpInst; 07780 return true; 07781 } 07782 case ARM::tADDrSP: { 07783 // If the non-SP source operand and the destination operand are not the 07784 // same, we need to use the 32-bit encoding if it's available. 07785 if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) { 07786 Inst.setOpcode(ARM::t2ADDrr); 07787 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 07788 return true; 07789 } 07790 break; 07791 } 07792 case ARM::tB: 07793 // A Thumb conditional branch outside of an IT block is a tBcc. 07794 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { 07795 Inst.setOpcode(ARM::tBcc); 07796 return true; 07797 } 07798 break; 07799 case ARM::t2B: 07800 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 07801 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ 07802 Inst.setOpcode(ARM::t2Bcc); 07803 return true; 07804 } 07805 break; 07806 case ARM::t2Bcc: 07807 // If the conditional is AL or we're in an IT block, we really want t2B. 07808 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { 07809 Inst.setOpcode(ARM::t2B); 07810 return true; 07811 } 07812 break; 07813 case ARM::tBcc: 07814 // If the conditional is AL, we really want tB. 07815 if (Inst.getOperand(1).getImm() == ARMCC::AL) { 07816 Inst.setOpcode(ARM::tB); 07817 return true; 07818 } 07819 break; 07820 case ARM::tLDMIA: { 07821 // If the register list contains any high registers, or if the writeback 07822 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 07823 // instead if we're in Thumb2. Otherwise, this should have generated 07824 // an error in validateInstruction(). 07825 unsigned Rn = Inst.getOperand(0).getReg(); 07826 bool hasWritebackToken = 07827 (static_cast<ARMOperand &>(*Operands[3]).isToken() && 07828 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!"); 07829 bool listContainsBase; 07830 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 07831 (!listContainsBase && !hasWritebackToken) || 07832 (listContainsBase && hasWritebackToken)) { 07833 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 07834 assert (isThumbTwo()); 07835 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 07836 // If we're switching to the updating version, we need to insert 07837 // the writeback tied operand. 07838 if (hasWritebackToken) 07839 Inst.insert(Inst.begin(), 07840 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 07841 return true; 07842 } 07843 break; 07844 } 07845 case ARM::tSTMIA_UPD: { 07846 // If the register list contains any high registers, we need to use 07847 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 07848 // should have generated an error in validateInstruction(). 07849 unsigned Rn = Inst.getOperand(0).getReg(); 07850 bool listContainsBase; 07851 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 07852 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 07853 assert (isThumbTwo()); 07854 Inst.setOpcode(ARM::t2STMIA_UPD); 07855 return true; 07856 } 07857 break; 07858 } 07859 case ARM::tPOP: { 07860 bool listContainsBase; 07861 // If the register list contains any high registers, we need to use 07862 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 07863 // should have generated an error in validateInstruction(). 07864 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) 07865 return false; 07866 assert (isThumbTwo()); 07867 Inst.setOpcode(ARM::t2LDMIA_UPD); 07868 // Add the base register and writeback operands. 07869 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 07870 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 07871 return true; 07872 } 07873 case ARM::tPUSH: { 07874 bool listContainsBase; 07875 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) 07876 return false; 07877 assert (isThumbTwo()); 07878 Inst.setOpcode(ARM::t2STMDB_UPD); 07879 // Add the base register and writeback operands. 07880 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 07881 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 07882 return true; 07883 } 07884 case ARM::t2MOVi: { 07885 // If we can use the 16-bit encoding and the user didn't explicitly 07886 // request the 32-bit variant, transform it here. 07887 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 07888 (unsigned)Inst.getOperand(1).getImm() <= 255 && 07889 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 07890 Inst.getOperand(4).getReg() == ARM::CPSR) || 07891 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 07892 (!static_cast<ARMOperand &>(*Operands[2]).isToken() || 07893 static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) { 07894 // The operands aren't in the same order for tMOVi8... 07895 MCInst TmpInst; 07896 TmpInst.setOpcode(ARM::tMOVi8); 07897 TmpInst.addOperand(Inst.getOperand(0)); 07898 TmpInst.addOperand(Inst.getOperand(4)); 07899 TmpInst.addOperand(Inst.getOperand(1)); 07900 TmpInst.addOperand(Inst.getOperand(2)); 07901 TmpInst.addOperand(Inst.getOperand(3)); 07902 Inst = TmpInst; 07903 return true; 07904 } 07905 break; 07906 } 07907 case ARM::t2MOVr: { 07908 // If we can use the 16-bit encoding and the user didn't explicitly 07909 // request the 32-bit variant, transform it here. 07910 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 07911 isARMLowRegister(Inst.getOperand(1).getReg()) && 07912 Inst.getOperand(2).getImm() == ARMCC::AL && 07913 Inst.getOperand(4).getReg() == ARM::CPSR && 07914 (!static_cast<ARMOperand &>(*Operands[2]).isToken() || 07915 static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) { 07916 // The operands aren't the same for tMOV[S]r... (no cc_out) 07917 MCInst TmpInst; 07918 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 07919 TmpInst.addOperand(Inst.getOperand(0)); 07920 TmpInst.addOperand(Inst.getOperand(1)); 07921 TmpInst.addOperand(Inst.getOperand(2)); 07922 TmpInst.addOperand(Inst.getOperand(3)); 07923 Inst = TmpInst; 07924 return true; 07925 } 07926 break; 07927 } 07928 case ARM::t2SXTH: 07929 case ARM::t2SXTB: 07930 case ARM::t2UXTH: 07931 case ARM::t2UXTB: { 07932 // If we can use the 16-bit encoding and the user didn't explicitly 07933 // request the 32-bit variant, transform it here. 07934 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 07935 isARMLowRegister(Inst.getOperand(1).getReg()) && 07936 Inst.getOperand(2).getImm() == 0 && 07937 (!static_cast<ARMOperand &>(*Operands[2]).isToken() || 07938 static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) { 07939 unsigned NewOpc; 07940 switch (Inst.getOpcode()) { 07941 default: llvm_unreachable("Illegal opcode!"); 07942 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 07943 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 07944 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 07945 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 07946 } 07947 // The operands aren't the same for thumb1 (no rotate operand). 07948 MCInst TmpInst; 07949 TmpInst.setOpcode(NewOpc); 07950 TmpInst.addOperand(Inst.getOperand(0)); 07951 TmpInst.addOperand(Inst.getOperand(1)); 07952 TmpInst.addOperand(Inst.getOperand(3)); 07953 TmpInst.addOperand(Inst.getOperand(4)); 07954 Inst = TmpInst; 07955 return true; 07956 } 07957 break; 07958 } 07959 case ARM::MOVsi: { 07960 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm()); 07961 // rrx shifts and asr/lsr of #32 is encoded as 0 07962 if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr) 07963 return false; 07964 if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) { 07965 // Shifting by zero is accepted as a vanilla 'MOVr' 07966 MCInst TmpInst; 07967 TmpInst.setOpcode(ARM::MOVr); 07968 TmpInst.addOperand(Inst.getOperand(0)); 07969 TmpInst.addOperand(Inst.getOperand(1)); 07970 TmpInst.addOperand(Inst.getOperand(3)); 07971 TmpInst.addOperand(Inst.getOperand(4)); 07972 TmpInst.addOperand(Inst.getOperand(5)); 07973 Inst = TmpInst; 07974 return true; 07975 } 07976 return false; 07977 } 07978 case ARM::ANDrsi: 07979 case ARM::ORRrsi: 07980 case ARM::EORrsi: 07981 case ARM::BICrsi: 07982 case ARM::SUBrsi: 07983 case ARM::ADDrsi: { 07984 unsigned newOpc; 07985 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm()); 07986 if (SOpc == ARM_AM::rrx) return false; 07987 switch (Inst.getOpcode()) { 07988 default: llvm_unreachable("unexpected opcode!"); 07989 case ARM::ANDrsi: newOpc = ARM::ANDrr; break; 07990 case ARM::ORRrsi: newOpc = ARM::ORRrr; break; 07991 case ARM::EORrsi: newOpc = ARM::EORrr; break; 07992 case ARM::BICrsi: newOpc = ARM::BICrr; break; 07993 case ARM::SUBrsi: newOpc = ARM::SUBrr; break; 07994 case ARM::ADDrsi: newOpc = ARM::ADDrr; break; 07995 } 07996 // If the shift is by zero, use the non-shifted instruction definition. 07997 // The exception is for right shifts, where 0 == 32 07998 if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 && 07999 !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) { 08000 MCInst TmpInst; 08001 TmpInst.setOpcode(newOpc); 08002 TmpInst.addOperand(Inst.getOperand(0)); 08003 TmpInst.addOperand(Inst.getOperand(1)); 08004 TmpInst.addOperand(Inst.getOperand(2)); 08005 TmpInst.addOperand(Inst.getOperand(4)); 08006 TmpInst.addOperand(Inst.getOperand(5)); 08007 TmpInst.addOperand(Inst.getOperand(6)); 08008 Inst = TmpInst; 08009 return true; 08010 } 08011 return false; 08012 } 08013 case ARM::ITasm: 08014 case ARM::t2IT: { 08015 // The mask bits for all but the first condition are represented as 08016 // the low bit of the condition code value implies 't'. We currently 08017 // always have 1 implies 't', so XOR toggle the bits if the low bit 08018 // of the condition code is zero. 08019 MCOperand &MO = Inst.getOperand(1); 08020 unsigned Mask = MO.getImm(); 08021 unsigned OrigMask = Mask; 08022 unsigned TZ = countTrailingZeros(Mask); 08023 if ((Inst.getOperand(0).getImm() & 1) == 0) { 08024 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 08025 Mask ^= (0xE << TZ) & 0xF; 08026 } 08027 MO.setImm(Mask); 08028 08029 // Set up the IT block state according to the IT instruction we just 08030 // matched. 08031 assert(!inITBlock() && "nested IT blocks?!"); 08032 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 08033 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 08034 ITState.CurPosition = 0; 08035 ITState.FirstCond = true; 08036 break; 08037 } 08038 case ARM::t2LSLrr: 08039 case ARM::t2LSRrr: 08040 case ARM::t2ASRrr: 08041 case ARM::t2SBCrr: 08042 case ARM::t2RORrr: 08043 case ARM::t2BICrr: 08044 { 08045 // Assemblers should use the narrow encodings of these instructions when permissible. 08046 if ((isARMLowRegister(Inst.getOperand(1).getReg()) && 08047 isARMLowRegister(Inst.getOperand(2).getReg())) && 08048 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && 08049 ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) || 08050 (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) && 08051 (!static_cast<ARMOperand &>(*Operands[3]).isToken() || 08052 !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower( 08053 ".w"))) { 08054 unsigned NewOpc; 08055 switch (Inst.getOpcode()) { 08056 default: llvm_unreachable("unexpected opcode"); 08057 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break; 08058 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break; 08059 case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break; 08060 case ARM::t2SBCrr: NewOpc = ARM::tSBC; break; 08061 case ARM::t2RORrr: NewOpc = ARM::tROR; break; 08062 case ARM::t2BICrr: NewOpc = ARM::tBIC; break; 08063 } 08064 MCInst TmpInst; 08065 TmpInst.setOpcode(NewOpc); 08066 TmpInst.addOperand(Inst.getOperand(0)); 08067 TmpInst.addOperand(Inst.getOperand(5)); 08068 TmpInst.addOperand(Inst.getOperand(1)); 08069 TmpInst.addOperand(Inst.getOperand(2)); 08070 TmpInst.addOperand(Inst.getOperand(3)); 08071 TmpInst.addOperand(Inst.getOperand(4)); 08072 Inst = TmpInst; 08073 return true; 08074 } 08075 return false; 08076 } 08077 case ARM::t2ANDrr: 08078 case ARM::t2EORrr: 08079 case ARM::t2ADCrr: 08080 case ARM::t2ORRrr: 08081 { 08082 // Assemblers should use the narrow encodings of these instructions when permissible. 08083 // These instructions are special in that they are commutable, so shorter encodings 08084 // are available more often. 08085 if ((isARMLowRegister(Inst.getOperand(1).getReg()) && 08086 isARMLowRegister(Inst.getOperand(2).getReg())) && 08087 (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() || 08088 Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) && 08089 ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) || 08090 (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) && 08091 (!static_cast<ARMOperand &>(*Operands[3]).isToken() || 08092 !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower( 08093 ".w"))) { 08094 unsigned NewOpc; 08095 switch (Inst.getOpcode()) { 08096 default: llvm_unreachable("unexpected opcode"); 08097 case ARM::t2ADCrr: NewOpc = ARM::tADC; break; 08098 case ARM::t2ANDrr: NewOpc = ARM::tAND; break; 08099 case ARM::t2EORrr: NewOpc = ARM::tEOR; break; 08100 case ARM::t2ORRrr: NewOpc = ARM::tORR; break; 08101 } 08102 MCInst TmpInst; 08103 TmpInst.setOpcode(NewOpc); 08104 TmpInst.addOperand(Inst.getOperand(0)); 08105 TmpInst.addOperand(Inst.getOperand(5)); 08106 if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) { 08107 TmpInst.addOperand(Inst.getOperand(1)); 08108 TmpInst.addOperand(Inst.getOperand(2)); 08109 } else { 08110 TmpInst.addOperand(Inst.getOperand(2)); 08111 TmpInst.addOperand(Inst.getOperand(1)); 08112 } 08113 TmpInst.addOperand(Inst.getOperand(3)); 08114 TmpInst.addOperand(Inst.getOperand(4)); 08115 Inst = TmpInst; 08116 return true; 08117 } 08118 return false; 08119 } 08120 } 08121 return false; 08122 } 08123 08124 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 08125 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 08126 // suffix depending on whether they're in an IT block or not. 08127 unsigned Opc = Inst.getOpcode(); 08128 const MCInstrDesc &MCID = MII.get(Opc); 08129 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 08130 assert(MCID.hasOptionalDef() && 08131 "optionally flag setting instruction missing optional def operand"); 08132 assert(MCID.NumOperands == Inst.getNumOperands() && 08133 "operand count mismatch!"); 08134 // Find the optional-def operand (cc_out). 08135 unsigned OpNo; 08136 for (OpNo = 0; 08137 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 08138 ++OpNo) 08139 ; 08140 // If we're parsing Thumb1, reject it completely. 08141 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 08142 return Match_MnemonicFail; 08143 // If we're parsing Thumb2, which form is legal depends on whether we're 08144 // in an IT block. 08145 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 08146 !inITBlock()) 08147 return Match_RequiresITBlock; 08148 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 08149 inITBlock()) 08150 return Match_RequiresNotITBlock; 08151 } 08152 // Some high-register supporting Thumb1 encodings only allow both registers 08153 // to be from r0-r7 when in Thumb2. 08154 else if (Opc == ARM::tADDhirr && isThumbOne() && 08155 isARMLowRegister(Inst.getOperand(1).getReg()) && 08156 isARMLowRegister(Inst.getOperand(2).getReg())) 08157 return Match_RequiresThumb2; 08158 // Others only require ARMv6 or later. 08159 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 08160 isARMLowRegister(Inst.getOperand(0).getReg()) && 08161 isARMLowRegister(Inst.getOperand(1).getReg())) 08162 return Match_RequiresV6; 08163 return Match_Success; 08164 } 08165 08166 namespace llvm { 08167 template <> inline bool IsCPSRDead<MCInst>(MCInst *Instr) { 08168 return true; // In an assembly source, no need to second-guess 08169 } 08170 } 08171 08172 static const char *getSubtargetFeatureName(uint64_t Val); 08173 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, 08174 OperandVector &Operands, 08175 MCStreamer &Out, uint64_t &ErrorInfo, 08176 bool MatchingInlineAsm) { 08177 MCInst Inst; 08178 unsigned MatchResult; 08179 08180 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo, 08181 MatchingInlineAsm); 08182 switch (MatchResult) { 08183 default: break; 08184 case Match_Success: 08185 // Context sensitive operand constraints aren't handled by the matcher, 08186 // so check them here. 08187 if (validateInstruction(Inst, Operands)) { 08188 // Still progress the IT block, otherwise one wrong condition causes 08189 // nasty cascading errors. 08190 forwardITPosition(); 08191 return true; 08192 } 08193 08194 { // processInstruction() updates inITBlock state, we need to save it away 08195 bool wasInITBlock = inITBlock(); 08196 08197 // Some instructions need post-processing to, for example, tweak which 08198 // encoding is selected. Loop on it while changes happen so the 08199 // individual transformations can chain off each other. E.g., 08200 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) 08201 while (processInstruction(Inst, Operands)) 08202 ; 08203 08204 // Only after the instruction is fully processed, we can validate it 08205 if (wasInITBlock && hasV8Ops() && isThumb() && 08206 !isV8EligibleForIT(&Inst)) { 08207 Warning(IDLoc, "deprecated instruction in IT block"); 08208 } 08209 } 08210 08211 // Only move forward at the very end so that everything in validate 08212 // and process gets a consistent answer about whether we're in an IT 08213 // block. 08214 forwardITPosition(); 08215 08216 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and 08217 // doesn't actually encode. 08218 if (Inst.getOpcode() == ARM::ITasm) 08219 return false; 08220 08221 Inst.setLoc(IDLoc); 08222 Out.EmitInstruction(Inst, STI); 08223 return false; 08224 case Match_MissingFeature: { 08225 assert(ErrorInfo && "Unknown missing feature!"); 08226 // Special case the error message for the very common case where only 08227 // a single subtarget feature is missing (Thumb vs. ARM, e.g.). 08228 std::string Msg = "instruction requires:"; 08229 uint64_t Mask = 1; 08230 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) { 08231 if (ErrorInfo & Mask) { 08232 Msg += " "; 08233 Msg += getSubtargetFeatureName(ErrorInfo & Mask); 08234 } 08235 Mask <<= 1; 08236 } 08237 return Error(IDLoc, Msg); 08238 } 08239 case Match_InvalidOperand: { 08240 SMLoc ErrorLoc = IDLoc; 08241 if (ErrorInfo != ~0ULL) { 08242 if (ErrorInfo >= Operands.size()) 08243 return Error(IDLoc, "too few operands for instruction"); 08244 08245 ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc(); 08246 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 08247 } 08248 08249 return Error(ErrorLoc, "invalid operand for instruction"); 08250 } 08251 case Match_MnemonicFail: 08252 return Error(IDLoc, "invalid instruction", 08253 ((ARMOperand &)*Operands[0]).getLocRange()); 08254 case Match_RequiresNotITBlock: 08255 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 08256 case Match_RequiresITBlock: 08257 return Error(IDLoc, "instruction only valid inside IT block"); 08258 case Match_RequiresV6: 08259 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 08260 case Match_RequiresThumb2: 08261 return Error(IDLoc, "instruction variant requires Thumb2"); 08262 case Match_ImmRange0_15: { 08263 SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc(); 08264 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 08265 return Error(ErrorLoc, "immediate operand must be in the range [0,15]"); 08266 } 08267 case Match_ImmRange0_239: { 08268 SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc(); 08269 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 08270 return Error(ErrorLoc, "immediate operand must be in the range [0,239]"); 08271 } 08272 case Match_AlignedMemoryRequiresNone: 08273 case Match_DupAlignedMemoryRequiresNone: 08274 case Match_AlignedMemoryRequires16: 08275 case Match_DupAlignedMemoryRequires16: 08276 case Match_AlignedMemoryRequires32: 08277 case Match_DupAlignedMemoryRequires32: 08278 case Match_AlignedMemoryRequires64: 08279 case Match_DupAlignedMemoryRequires64: 08280 case Match_AlignedMemoryRequires64or128: 08281 case Match_DupAlignedMemoryRequires64or128: 08282 case Match_AlignedMemoryRequires64or128or256: 08283 { 08284 SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getAlignmentLoc(); 08285 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 08286 switch (MatchResult) { 08287 default: 08288 llvm_unreachable("Missing Match_Aligned type"); 08289 case Match_AlignedMemoryRequiresNone: 08290 case Match_DupAlignedMemoryRequiresNone: 08291 return Error(ErrorLoc, "alignment must be omitted"); 08292 case Match_AlignedMemoryRequires16: 08293 case Match_DupAlignedMemoryRequires16: 08294 return Error(ErrorLoc, "alignment must be 16 or omitted"); 08295 case Match_AlignedMemoryRequires32: 08296 case Match_DupAlignedMemoryRequires32: 08297 return Error(ErrorLoc, "alignment must be 32 or omitted"); 08298 case Match_AlignedMemoryRequires64: 08299 case Match_DupAlignedMemoryRequires64: 08300 return Error(ErrorLoc, "alignment must be 64 or omitted"); 08301 case Match_AlignedMemoryRequires64or128: 08302 case Match_DupAlignedMemoryRequires64or128: 08303 return Error(ErrorLoc, "alignment must be 64, 128 or omitted"); 08304 case Match_AlignedMemoryRequires64or128or256: 08305 return Error(ErrorLoc, "alignment must be 64, 128, 256 or omitted"); 08306 } 08307 } 08308 } 08309 08310 llvm_unreachable("Implement any new match types added!"); 08311 } 08312 08313 /// parseDirective parses the arm specific directives 08314 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 08315 const MCObjectFileInfo::Environment Format = 08316 getContext().getObjectFileInfo()->getObjectFileType(); 08317 bool IsMachO = Format == MCObjectFileInfo::IsMachO; 08318 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF; 08319 08320 StringRef IDVal = DirectiveID.getIdentifier(); 08321 if (IDVal == ".word") 08322 return parseLiteralValues(4, DirectiveID.getLoc()); 08323 else if (IDVal == ".short" || IDVal == ".hword") 08324 return parseLiteralValues(2, DirectiveID.getLoc()); 08325 else if (IDVal == ".thumb") 08326 return parseDirectiveThumb(DirectiveID.getLoc()); 08327 else if (IDVal == ".arm") 08328 return parseDirectiveARM(DirectiveID.getLoc()); 08329 else if (IDVal == ".thumb_func") 08330 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 08331 else if (IDVal == ".code") 08332 return parseDirectiveCode(DirectiveID.getLoc()); 08333 else if (IDVal == ".syntax") 08334 return parseDirectiveSyntax(DirectiveID.getLoc()); 08335 else if (IDVal == ".unreq") 08336 return parseDirectiveUnreq(DirectiveID.getLoc()); 08337 else if (IDVal == ".fnend") 08338 return parseDirectiveFnEnd(DirectiveID.getLoc()); 08339 else if (IDVal == ".cantunwind") 08340 return parseDirectiveCantUnwind(DirectiveID.getLoc()); 08341 else if (IDVal == ".personality") 08342 return parseDirectivePersonality(DirectiveID.getLoc()); 08343 else if (IDVal == ".handlerdata") 08344 return parseDirectiveHandlerData(DirectiveID.getLoc()); 08345 else if (IDVal == ".setfp") 08346 return parseDirectiveSetFP(DirectiveID.getLoc()); 08347 else if (IDVal == ".pad") 08348 return parseDirectivePad(DirectiveID.getLoc()); 08349 else if (IDVal == ".save") 08350 return parseDirectiveRegSave(DirectiveID.getLoc(), false); 08351 else if (IDVal == ".vsave") 08352 return parseDirectiveRegSave(DirectiveID.getLoc(), true); 08353 else if (IDVal == ".ltorg" || IDVal == ".pool") 08354 return parseDirectiveLtorg(DirectiveID.getLoc()); 08355 else if (IDVal == ".even") 08356 return parseDirectiveEven(DirectiveID.getLoc()); 08357 else if (IDVal == ".personalityindex") 08358 return parseDirectivePersonalityIndex(DirectiveID.getLoc()); 08359 else if (IDVal == ".unwind_raw") 08360 return parseDirectiveUnwindRaw(DirectiveID.getLoc()); 08361 else if (IDVal == ".movsp") 08362 return parseDirectiveMovSP(DirectiveID.getLoc()); 08363 else if (IDVal == ".arch_extension") 08364 return parseDirectiveArchExtension(DirectiveID.getLoc()); 08365 else if (IDVal == ".align") 08366 return parseDirectiveAlign(DirectiveID.getLoc()); 08367 else if (IDVal == ".thumb_set") 08368 return parseDirectiveThumbSet(DirectiveID.getLoc()); 08369 08370 if (!IsMachO && !IsCOFF) { 08371 if (IDVal == ".arch") 08372 return parseDirectiveArch(DirectiveID.getLoc()); 08373 else if (IDVal == ".cpu") 08374 return parseDirectiveCPU(DirectiveID.getLoc()); 08375 else if (IDVal == ".eabi_attribute") 08376 return parseDirectiveEabiAttr(DirectiveID.getLoc()); 08377 else if (IDVal == ".fpu") 08378 return parseDirectiveFPU(DirectiveID.getLoc()); 08379 else if (IDVal == ".fnstart") 08380 return parseDirectiveFnStart(DirectiveID.getLoc()); 08381 else if (IDVal == ".inst") 08382 return parseDirectiveInst(DirectiveID.getLoc()); 08383 else if (IDVal == ".inst.n") 08384 return parseDirectiveInst(DirectiveID.getLoc(), 'n'); 08385 else if (IDVal == ".inst.w") 08386 return parseDirectiveInst(DirectiveID.getLoc(), 'w'); 08387 else if (IDVal == ".object_arch") 08388 return parseDirectiveObjectArch(DirectiveID.getLoc()); 08389 else if (IDVal == ".tlsdescseq") 08390 return parseDirectiveTLSDescSeq(DirectiveID.getLoc()); 08391 } 08392 08393 return true; 08394 } 08395 08396 /// parseLiteralValues 08397 /// ::= .hword expression [, expression]* 08398 /// ::= .short expression [, expression]* 08399 /// ::= .word expression [, expression]* 08400 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) { 08401 if (getLexer().isNot(AsmToken::EndOfStatement)) { 08402 for (;;) { 08403 const MCExpr *Value; 08404 if (getParser().parseExpression(Value)) { 08405 Parser.eatToEndOfStatement(); 08406 return false; 08407 } 08408 08409 getParser().getStreamer().EmitValue(Value, Size); 08410 08411 if (getLexer().is(AsmToken::EndOfStatement)) 08412 break; 08413 08414 // FIXME: Improve diagnostic. 08415 if (getLexer().isNot(AsmToken::Comma)) { 08416 Error(L, "unexpected token in directive"); 08417 return false; 08418 } 08419 Parser.Lex(); 08420 } 08421 } 08422 08423 Parser.Lex(); 08424 return false; 08425 } 08426 08427 /// parseDirectiveThumb 08428 /// ::= .thumb 08429 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 08430 if (getLexer().isNot(AsmToken::EndOfStatement)) { 08431 Error(L, "unexpected token in directive"); 08432 return false; 08433 } 08434 Parser.Lex(); 08435 08436 if (!hasThumb()) { 08437 Error(L, "target does not support Thumb mode"); 08438 return false; 08439 } 08440 08441 if (!isThumb()) 08442 SwitchMode(); 08443 08444 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 08445 return false; 08446 } 08447 08448 /// parseDirectiveARM 08449 /// ::= .arm 08450 bool ARMAsmParser::parseDirectiveARM(SMLoc L) { 08451 if (getLexer().isNot(AsmToken::EndOfStatement)) { 08452 Error(L, "unexpected token in directive"); 08453 return false; 08454 } 08455 Parser.Lex(); 08456 08457 if (!hasARM()) { 08458 Error(L, "target does not support ARM mode"); 08459 return false; 08460 } 08461 08462 if (isThumb()) 08463 SwitchMode(); 08464 08465 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 08466 return false; 08467 } 08468 08469 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) { 08470 if (NextSymbolIsThumb) { 08471 getParser().getStreamer().EmitThumbFunc(Symbol); 08472 NextSymbolIsThumb = false; 08473 } 08474 } 08475 08476 /// parseDirectiveThumbFunc 08477 /// ::= .thumbfunc symbol_name 08478 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 08479 const auto Format = getContext().getObjectFileInfo()->getObjectFileType(); 08480 bool IsMachO = Format == MCObjectFileInfo::IsMachO; 08481 08482 // Darwin asm has (optionally) function name after .thumb_func direction 08483 // ELF doesn't 08484 if (IsMachO) { 08485 const AsmToken &Tok = Parser.getTok(); 08486 if (Tok.isNot(AsmToken::EndOfStatement)) { 08487 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) { 08488 Error(L, "unexpected token in .thumb_func directive"); 08489 return false; 08490 } 08491 08492 MCSymbol *Func = 08493 getParser().getContext().GetOrCreateSymbol(Tok.getIdentifier()); 08494 getParser().getStreamer().EmitThumbFunc(Func); 08495 Parser.Lex(); // Consume the identifier token. 08496 return false; 08497 } 08498 } 08499 08500 if (getLexer().isNot(AsmToken::EndOfStatement)) { 08501 Error(Parser.getTok().getLoc(), "unexpected token in directive"); 08502 Parser.eatToEndOfStatement(); 08503 return false; 08504 } 08505 08506 NextSymbolIsThumb = true; 08507 return false; 08508 } 08509 08510 /// parseDirectiveSyntax 08511 /// ::= .syntax unified | divided 08512 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 08513 const AsmToken &Tok = Parser.getTok(); 08514 if (Tok.isNot(AsmToken::Identifier)) { 08515 Error(L, "unexpected token in .syntax directive"); 08516 return false; 08517 } 08518 08519 StringRef Mode = Tok.getString(); 08520 if (Mode == "unified" || Mode == "UNIFIED") { 08521 Parser.Lex(); 08522 } else if (Mode == "divided" || Mode == "DIVIDED") { 08523 Error(L, "'.syntax divided' arm asssembly not supported"); 08524 return false; 08525 } else { 08526 Error(L, "unrecognized syntax mode in .syntax directive"); 08527 return false; 08528 } 08529 08530 if (getLexer().isNot(AsmToken::EndOfStatement)) { 08531 Error(Parser.getTok().getLoc(), "unexpected token in directive"); 08532 return false; 08533 } 08534 Parser.Lex(); 08535 08536 // TODO tell the MC streamer the mode 08537 // getParser().getStreamer().Emit???(); 08538 return false; 08539 } 08540 08541 /// parseDirectiveCode 08542 /// ::= .code 16 | 32 08543 bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 08544 const AsmToken &Tok = Parser.getTok(); 08545 if (Tok.isNot(AsmToken::Integer)) { 08546 Error(L, "unexpected token in .code directive"); 08547 return false; 08548 } 08549 int64_t Val = Parser.getTok().getIntVal(); 08550 if (Val != 16 && Val != 32) { 08551 Error(L, "invalid operand to .code directive"); 08552 return false; 08553 } 08554 Parser.Lex(); 08555 08556 if (getLexer().isNot(AsmToken::EndOfStatement)) { 08557 Error(Parser.getTok().getLoc(), "unexpected token in directive"); 08558 return false; 08559 } 08560 Parser.Lex(); 08561 08562 if (Val == 16) { 08563 if (!hasThumb()) { 08564 Error(L, "target does not support Thumb mode"); 08565 return false; 08566 } 08567 08568 if (!isThumb()) 08569 SwitchMode(); 08570 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 08571 } else { 08572 if (!hasARM()) { 08573 Error(L, "target does not support ARM mode"); 08574 return false; 08575 } 08576 08577 if (isThumb()) 08578 SwitchMode(); 08579 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 08580 } 08581 08582 return false; 08583 } 08584 08585 /// parseDirectiveReq 08586 /// ::= name .req registername 08587 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { 08588 Parser.Lex(); // Eat the '.req' token. 08589 unsigned Reg; 08590 SMLoc SRegLoc, ERegLoc; 08591 if (ParseRegister(Reg, SRegLoc, ERegLoc)) { 08592 Parser.eatToEndOfStatement(); 08593 Error(SRegLoc, "register name expected"); 08594 return false; 08595 } 08596 08597 // Shouldn't be anything else. 08598 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) { 08599 Parser.eatToEndOfStatement(); 08600 Error(Parser.getTok().getLoc(), "unexpected input in .req directive."); 08601 return false; 08602 } 08603 08604 Parser.Lex(); // Consume the EndOfStatement 08605 08606 if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg) { 08607 Error(SRegLoc, "redefinition of '" + Name + "' does not match original."); 08608 return false; 08609 } 08610 08611 return false; 08612 } 08613 08614 /// parseDirectiveUneq 08615 /// ::= .unreq registername 08616 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) { 08617 if (Parser.getTok().isNot(AsmToken::Identifier)) { 08618 Parser.eatToEndOfStatement(); 08619 Error(L, "unexpected input in .unreq directive."); 08620 return false; 08621 } 08622 RegisterReqs.erase(Parser.getTok().getIdentifier().lower()); 08623 Parser.Lex(); // Eat the identifier. 08624 return false; 08625 } 08626 08627 /// parseDirectiveArch 08628 /// ::= .arch token 08629 bool ARMAsmParser::parseDirectiveArch(SMLoc L) { 08630 StringRef Arch = getParser().parseStringToEndOfStatement().trim(); 08631 08632 unsigned ID = StringSwitch<unsigned>(Arch) 08633 #define ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH) \ 08634 .Case(NAME, ARM::ID) 08635 #define ARM_ARCH_ALIAS(NAME, ID) \ 08636 .Case(NAME, ARM::ID) 08637 #include "MCTargetDesc/ARMArchName.def" 08638 .Default(ARM::INVALID_ARCH); 08639 08640 if (ID == ARM::INVALID_ARCH) { 08641 Error(L, "Unknown arch name"); 08642 return false; 08643 } 08644 08645 getTargetStreamer().emitArch(ID); 08646 return false; 08647 } 08648 08649 /// parseDirectiveEabiAttr 08650 /// ::= .eabi_attribute int, int [, "str"] 08651 /// ::= .eabi_attribute Tag_name, int [, "str"] 08652 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) { 08653 int64_t Tag; 08654 SMLoc TagLoc; 08655 TagLoc = Parser.getTok().getLoc(); 08656 if (Parser.getTok().is(AsmToken::Identifier)) { 08657 StringRef Name = Parser.getTok().getIdentifier(); 08658 Tag = ARMBuildAttrs::AttrTypeFromString(Name); 08659 if (Tag == -1) { 08660 Error(TagLoc, "attribute name not recognised: " + Name); 08661 Parser.eatToEndOfStatement(); 08662 return false; 08663 } 08664 Parser.Lex(); 08665 } else { 08666 const MCExpr *AttrExpr; 08667 08668 TagLoc = Parser.getTok().getLoc(); 08669 if (Parser.parseExpression(AttrExpr)) { 08670 Parser.eatToEndOfStatement(); 08671 return false; 08672 } 08673 08674 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr); 08675 if (!CE) { 08676 Error(TagLoc, "expected numeric constant"); 08677 Parser.eatToEndOfStatement(); 08678 return false; 08679 } 08680 08681 Tag = CE->getValue(); 08682 } 08683 08684 if (Parser.getTok().isNot(AsmToken::Comma)) { 08685 Error(Parser.getTok().getLoc(), "comma expected"); 08686 Parser.eatToEndOfStatement(); 08687 return false; 08688 } 08689 Parser.Lex(); // skip comma 08690 08691 StringRef StringValue = ""; 08692 bool IsStringValue = false; 08693 08694 int64_t IntegerValue = 0; 08695 bool IsIntegerValue = false; 08696 08697 if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name) 08698 IsStringValue = true; 08699 else if (Tag == ARMBuildAttrs::compatibility) { 08700 IsStringValue = true; 08701 IsIntegerValue = true; 08702 } else if (Tag < 32 || Tag % 2 == 0) 08703 IsIntegerValue = true; 08704 else if (Tag % 2 == 1) 08705 IsStringValue = true; 08706 else 08707 llvm_unreachable("invalid tag type"); 08708 08709 if (IsIntegerValue) { 08710 const MCExpr *ValueExpr; 08711 SMLoc ValueExprLoc = Parser.getTok().getLoc(); 08712 if (Parser.parseExpression(ValueExpr)) { 08713 Parser.eatToEndOfStatement(); 08714 return false; 08715 } 08716 08717 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr); 08718 if (!CE) { 08719 Error(ValueExprLoc, "expected numeric constant"); 08720 Parser.eatToEndOfStatement(); 08721 return false; 08722 } 08723 08724 IntegerValue = CE->getValue(); 08725 } 08726 08727 if (Tag == ARMBuildAttrs::compatibility) { 08728 if (Parser.getTok().isNot(AsmToken::Comma)) 08729 IsStringValue = false; 08730 else 08731 Parser.Lex(); 08732 } 08733 08734 if (IsStringValue) { 08735 if (Parser.getTok().isNot(AsmToken::String)) { 08736 Error(Parser.getTok().getLoc(), "bad string constant"); 08737 Parser.eatToEndOfStatement(); 08738 return false; 08739 } 08740 08741 StringValue = Parser.getTok().getStringContents(); 08742 Parser.Lex(); 08743 } 08744 08745 if (IsIntegerValue && IsStringValue) { 08746 assert(Tag == ARMBuildAttrs::compatibility); 08747 getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue); 08748 } else if (IsIntegerValue) 08749 getTargetStreamer().emitAttribute(Tag, IntegerValue); 08750 else if (IsStringValue) 08751 getTargetStreamer().emitTextAttribute(Tag, StringValue); 08752 return false; 08753 } 08754 08755 /// parseDirectiveCPU 08756 /// ::= .cpu str 08757 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) { 08758 StringRef CPU = getParser().parseStringToEndOfStatement().trim(); 08759 getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU); 08760 return false; 08761 } 08762 08763 // FIXME: This is duplicated in getARMFPUFeatures() in 08764 // tools/clang/lib/Driver/Tools.cpp 08765 static const struct { 08766 const unsigned Fpu; 08767 const uint64_t Enabled; 08768 const uint64_t Disabled; 08769 } Fpus[] = { 08770 {ARM::VFP, ARM::FeatureVFP2, ARM::FeatureNEON}, 08771 {ARM::VFPV2, ARM::FeatureVFP2, ARM::FeatureNEON}, 08772 {ARM::VFPV3, ARM::FeatureVFP3, ARM::FeatureNEON}, 08773 {ARM::VFPV3_D16, ARM::FeatureVFP3 | ARM::FeatureD16, ARM::FeatureNEON}, 08774 {ARM::VFPV4, ARM::FeatureVFP4, ARM::FeatureNEON}, 08775 {ARM::VFPV4_D16, ARM::FeatureVFP4 | ARM::FeatureD16, ARM::FeatureNEON}, 08776 {ARM::FP_ARMV8, ARM::FeatureFPARMv8, 08777 ARM::FeatureNEON | ARM::FeatureCrypto}, 08778 {ARM::NEON, ARM::FeatureNEON, 0}, 08779 {ARM::NEON_VFPV4, ARM::FeatureVFP4 | ARM::FeatureNEON, 0}, 08780 {ARM::NEON_FP_ARMV8, ARM::FeatureFPARMv8 | ARM::FeatureNEON, 08781 ARM::FeatureCrypto}, 08782 {ARM::CRYPTO_NEON_FP_ARMV8, 08783 ARM::FeatureFPARMv8 | ARM::FeatureNEON | ARM::FeatureCrypto, 0}, 08784 {ARM::SOFTVFP, 0, 0}, 08785 }; 08786 08787 /// parseDirectiveFPU 08788 /// ::= .fpu str 08789 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) { 08790 StringRef FPU = getParser().parseStringToEndOfStatement().trim(); 08791 08792 unsigned ID = StringSwitch<unsigned>(FPU) 08793 #define ARM_FPU_NAME(NAME, ID) .Case(NAME, ARM::ID) 08794 #include "ARMFPUName.def" 08795 .Default(ARM::INVALID_FPU); 08796 08797 if (ID == ARM::INVALID_FPU) { 08798 Error(L, "Unknown FPU name"); 08799 return false; 08800 } 08801 08802 for (const auto &Fpu : Fpus) { 08803 if (Fpu.Fpu != ID) 08804 continue; 08805 08806 // Need to toggle features that should be on but are off and that 08807 // should off but are on. 08808 uint64_t Toggle = (Fpu.Enabled & ~STI.getFeatureBits()) | 08809 (Fpu.Disabled & STI.getFeatureBits()); 08810 setAvailableFeatures(ComputeAvailableFeatures(STI.ToggleFeature(Toggle))); 08811 break; 08812 } 08813 08814 getTargetStreamer().emitFPU(ID); 08815 return false; 08816 } 08817 08818 /// parseDirectiveFnStart 08819 /// ::= .fnstart 08820 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) { 08821 if (UC.hasFnStart()) { 08822 Error(L, ".fnstart starts before the end of previous one"); 08823 UC.emitFnStartLocNotes(); 08824 return false; 08825 } 08826 08827 // Reset the unwind directives parser state 08828 UC.reset(); 08829 08830 getTargetStreamer().emitFnStart(); 08831 08832 UC.recordFnStart(L); 08833 return false; 08834 } 08835 08836 /// parseDirectiveFnEnd 08837 /// ::= .fnend 08838 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) { 08839 // Check the ordering of unwind directives 08840 if (!UC.hasFnStart()) { 08841 Error(L, ".fnstart must precede .fnend directive"); 08842 return false; 08843 } 08844 08845 // Reset the unwind directives parser state 08846 getTargetStreamer().emitFnEnd(); 08847 08848 UC.reset(); 08849 return false; 08850 } 08851 08852 /// parseDirectiveCantUnwind 08853 /// ::= .cantunwind 08854 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) { 08855 UC.recordCantUnwind(L); 08856 08857 // Check the ordering of unwind directives 08858 if (!UC.hasFnStart()) { 08859 Error(L, ".fnstart must precede .cantunwind directive"); 08860 return false; 08861 } 08862 if (UC.hasHandlerData()) { 08863 Error(L, ".cantunwind can't be used with .handlerdata directive"); 08864 UC.emitHandlerDataLocNotes(); 08865 return false; 08866 } 08867 if (UC.hasPersonality()) { 08868 Error(L, ".cantunwind can't be used with .personality directive"); 08869 UC.emitPersonalityLocNotes(); 08870 return false; 08871 } 08872 08873 getTargetStreamer().emitCantUnwind(); 08874 return false; 08875 } 08876 08877 /// parseDirectivePersonality 08878 /// ::= .personality name 08879 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) { 08880 bool HasExistingPersonality = UC.hasPersonality(); 08881 08882 UC.recordPersonality(L); 08883 08884 // Check the ordering of unwind directives 08885 if (!UC.hasFnStart()) { 08886 Error(L, ".fnstart must precede .personality directive"); 08887 return false; 08888 } 08889 if (UC.cantUnwind()) { 08890 Error(L, ".personality can't be used with .cantunwind directive"); 08891 UC.emitCantUnwindLocNotes(); 08892 return false; 08893 } 08894 if (UC.hasHandlerData()) { 08895 Error(L, ".personality must precede .handlerdata directive"); 08896 UC.emitHandlerDataLocNotes(); 08897 return false; 08898 } 08899 if (HasExistingPersonality) { 08900 Parser.eatToEndOfStatement(); 08901 Error(L, "multiple personality directives"); 08902 UC.emitPersonalityLocNotes(); 08903 return false; 08904 } 08905 08906 // Parse the name of the personality routine 08907 if (Parser.getTok().isNot(AsmToken::Identifier)) { 08908 Parser.eatToEndOfStatement(); 08909 Error(L, "unexpected input in .personality directive."); 08910 return false; 08911 } 08912 StringRef Name(Parser.getTok().getIdentifier()); 08913 Parser.Lex(); 08914 08915 MCSymbol *PR = getParser().getContext().GetOrCreateSymbol(Name); 08916 getTargetStreamer().emitPersonality(PR); 08917 return false; 08918 } 08919 08920 /// parseDirectiveHandlerData 08921 /// ::= .handlerdata 08922 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) { 08923 UC.recordHandlerData(L); 08924 08925 // Check the ordering of unwind directives 08926 if (!UC.hasFnStart()) { 08927 Error(L, ".fnstart must precede .personality directive"); 08928 return false; 08929 } 08930 if (UC.cantUnwind()) { 08931 Error(L, ".handlerdata can't be used with .cantunwind directive"); 08932 UC.emitCantUnwindLocNotes(); 08933 return false; 08934 } 08935 08936 getTargetStreamer().emitHandlerData(); 08937 return false; 08938 } 08939 08940 /// parseDirectiveSetFP 08941 /// ::= .setfp fpreg, spreg [, offset] 08942 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) { 08943 // Check the ordering of unwind directives 08944 if (!UC.hasFnStart()) { 08945 Error(L, ".fnstart must precede .setfp directive"); 08946 return false; 08947 } 08948 if (UC.hasHandlerData()) { 08949 Error(L, ".setfp must precede .handlerdata directive"); 08950 return false; 08951 } 08952 08953 // Parse fpreg 08954 SMLoc FPRegLoc = Parser.getTok().getLoc(); 08955 int FPReg = tryParseRegister(); 08956 if (FPReg == -1) { 08957 Error(FPRegLoc, "frame pointer register expected"); 08958 return false; 08959 } 08960 08961 // Consume comma 08962 if (Parser.getTok().isNot(AsmToken::Comma)) { 08963 Error(Parser.getTok().getLoc(), "comma expected"); 08964 return false; 08965 } 08966 Parser.Lex(); // skip comma 08967 08968 // Parse spreg 08969 SMLoc SPRegLoc = Parser.getTok().getLoc(); 08970 int SPReg = tryParseRegister(); 08971 if (SPReg == -1) { 08972 Error(SPRegLoc, "stack pointer register expected"); 08973 return false; 08974 } 08975 08976 if (SPReg != ARM::SP && SPReg != UC.getFPReg()) { 08977 Error(SPRegLoc, "register should be either $sp or the latest fp register"); 08978 return false; 08979 } 08980 08981 // Update the frame pointer register 08982 UC.saveFPReg(FPReg); 08983 08984 // Parse offset 08985 int64_t Offset = 0; 08986 if (Parser.getTok().is(AsmToken::Comma)) { 08987 Parser.Lex(); // skip comma 08988 08989 if (Parser.getTok().isNot(AsmToken::Hash) && 08990 Parser.getTok().isNot(AsmToken::Dollar)) { 08991 Error(Parser.getTok().getLoc(), "'#' expected"); 08992 return false; 08993 } 08994 Parser.Lex(); // skip hash token. 08995 08996 const MCExpr *OffsetExpr; 08997 SMLoc ExLoc = Parser.getTok().getLoc(); 08998 SMLoc EndLoc; 08999 if (getParser().parseExpression(OffsetExpr, EndLoc)) { 09000 Error(ExLoc, "malformed setfp offset"); 09001 return false; 09002 } 09003 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); 09004 if (!CE) { 09005 Error(ExLoc, "setfp offset must be an immediate"); 09006 return false; 09007 } 09008 09009 Offset = CE->getValue(); 09010 } 09011 09012 getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg), 09013 static_cast<unsigned>(SPReg), Offset); 09014 return false; 09015 } 09016 09017 /// parseDirective 09018 /// ::= .pad offset 09019 bool ARMAsmParser::parseDirectivePad(SMLoc L) { 09020 // Check the ordering of unwind directives 09021 if (!UC.hasFnStart()) { 09022 Error(L, ".fnstart must precede .pad directive"); 09023 return false; 09024 } 09025 if (UC.hasHandlerData()) { 09026 Error(L, ".pad must precede .handlerdata directive"); 09027 return false; 09028 } 09029 09030 // Parse the offset 09031 if (Parser.getTok().isNot(AsmToken::Hash) && 09032 Parser.getTok().isNot(AsmToken::Dollar)) { 09033 Error(Parser.getTok().getLoc(), "'#' expected"); 09034 return false; 09035 } 09036 Parser.Lex(); // skip hash token. 09037 09038 const MCExpr *OffsetExpr; 09039 SMLoc ExLoc = Parser.getTok().getLoc(); 09040 SMLoc EndLoc; 09041 if (getParser().parseExpression(OffsetExpr, EndLoc)) { 09042 Error(ExLoc, "malformed pad offset"); 09043 return false; 09044 } 09045 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); 09046 if (!CE) { 09047 Error(ExLoc, "pad offset must be an immediate"); 09048 return false; 09049 } 09050 09051 getTargetStreamer().emitPad(CE->getValue()); 09052 return false; 09053 } 09054 09055 /// parseDirectiveRegSave 09056 /// ::= .save { registers } 09057 /// ::= .vsave { registers } 09058 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) { 09059 // Check the ordering of unwind directives 09060 if (!UC.hasFnStart()) { 09061 Error(L, ".fnstart must precede .save or .vsave directives"); 09062 return false; 09063 } 09064 if (UC.hasHandlerData()) { 09065 Error(L, ".save or .vsave must precede .handlerdata directive"); 09066 return false; 09067 } 09068 09069 // RAII object to make sure parsed operands are deleted. 09070 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands; 09071 09072 // Parse the register list 09073 if (parseRegisterList(Operands)) 09074 return false; 09075 ARMOperand &Op = (ARMOperand &)*Operands[0]; 09076 if (!IsVector && !Op.isRegList()) { 09077 Error(L, ".save expects GPR registers"); 09078 return false; 09079 } 09080 if (IsVector && !Op.isDPRRegList()) { 09081 Error(L, ".vsave expects DPR registers"); 09082 return false; 09083 } 09084 09085 getTargetStreamer().emitRegSave(Op.getRegList(), IsVector); 09086 return false; 09087 } 09088 09089 /// parseDirectiveInst 09090 /// ::= .inst opcode [, ...] 09091 /// ::= .inst.n opcode [, ...] 09092 /// ::= .inst.w opcode [, ...] 09093 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) { 09094 int Width; 09095 09096 if (isThumb()) { 09097 switch (Suffix) { 09098 case 'n': 09099 Width = 2; 09100 break; 09101 case 'w': 09102 Width = 4; 09103 break; 09104 default: 09105 Parser.eatToEndOfStatement(); 09106 Error(Loc, "cannot determine Thumb instruction size, " 09107 "use inst.n/inst.w instead"); 09108 return false; 09109 } 09110 } else { 09111 if (Suffix) { 09112 Parser.eatToEndOfStatement(); 09113 Error(Loc, "width suffixes are invalid in ARM mode"); 09114 return false; 09115 } 09116 Width = 4; 09117 } 09118 09119 if (getLexer().is(AsmToken::EndOfStatement)) { 09120 Parser.eatToEndOfStatement(); 09121 Error(Loc, "expected expression following directive"); 09122 return false; 09123 } 09124 09125 for (;;) { 09126 const MCExpr *Expr; 09127 09128 if (getParser().parseExpression(Expr)) { 09129 Error(Loc, "expected expression"); 09130 return false; 09131 } 09132 09133 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr); 09134 if (!Value) { 09135 Error(Loc, "expected constant expression"); 09136 return false; 09137 } 09138 09139 switch (Width) { 09140 case 2: 09141 if (Value->getValue() > 0xffff) { 09142 Error(Loc, "inst.n operand is too big, use inst.w instead"); 09143 return false; 09144 } 09145 break; 09146 case 4: 09147 if (Value->getValue() > 0xffffffff) { 09148 Error(Loc, 09149 StringRef(Suffix ? "inst.w" : "inst") + " operand is too big"); 09150 return false; 09151 } 09152 break; 09153 default: 09154 llvm_unreachable("only supported widths are 2 and 4"); 09155 } 09156 09157 getTargetStreamer().emitInst(Value->getValue(), Suffix); 09158 09159 if (getLexer().is(AsmToken::EndOfStatement)) 09160 break; 09161 09162 if (getLexer().isNot(AsmToken::Comma)) { 09163 Error(Loc, "unexpected token in directive"); 09164 return false; 09165 } 09166 09167 Parser.Lex(); 09168 } 09169 09170 Parser.Lex(); 09171 return false; 09172 } 09173 09174 /// parseDirectiveLtorg 09175 /// ::= .ltorg | .pool 09176 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) { 09177 getTargetStreamer().emitCurrentConstantPool(); 09178 return false; 09179 } 09180 09181 bool ARMAsmParser::parseDirectiveEven(SMLoc L) { 09182 const MCSection *Section = getStreamer().getCurrentSection().first; 09183 09184 if (getLexer().isNot(AsmToken::EndOfStatement)) { 09185 TokError("unexpected token in directive"); 09186 return false; 09187 } 09188 09189 if (!Section) { 09190 getStreamer().InitSections(); 09191 Section = getStreamer().getCurrentSection().first; 09192 } 09193 09194 assert(Section && "must have section to emit alignment"); 09195 if (Section->UseCodeAlign()) 09196 getStreamer().EmitCodeAlignment(2); 09197 else 09198 getStreamer().EmitValueToAlignment(2); 09199 09200 return false; 09201 } 09202 09203 /// parseDirectivePersonalityIndex 09204 /// ::= .personalityindex index 09205 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) { 09206 bool HasExistingPersonality = UC.hasPersonality(); 09207 09208 UC.recordPersonalityIndex(L); 09209 09210 if (!UC.hasFnStart()) { 09211 Parser.eatToEndOfStatement(); 09212 Error(L, ".fnstart must precede .personalityindex directive"); 09213 return false; 09214 } 09215 if (UC.cantUnwind()) { 09216 Parser.eatToEndOfStatement(); 09217 Error(L, ".personalityindex cannot be used with .cantunwind"); 09218 UC.emitCantUnwindLocNotes(); 09219 return false; 09220 } 09221 if (UC.hasHandlerData()) { 09222 Parser.eatToEndOfStatement(); 09223 Error(L, ".personalityindex must precede .handlerdata directive"); 09224 UC.emitHandlerDataLocNotes(); 09225 return false; 09226 } 09227 if (HasExistingPersonality) { 09228 Parser.eatToEndOfStatement(); 09229 Error(L, "multiple personality directives"); 09230 UC.emitPersonalityLocNotes(); 09231 return false; 09232 } 09233 09234 const MCExpr *IndexExpression; 09235 SMLoc IndexLoc = Parser.getTok().getLoc(); 09236 if (Parser.parseExpression(IndexExpression)) { 09237 Parser.eatToEndOfStatement(); 09238 return false; 09239 } 09240 09241 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression); 09242 if (!CE) { 09243 Parser.eatToEndOfStatement(); 09244 Error(IndexLoc, "index must be a constant number"); 09245 return false; 09246 } 09247 if (CE->getValue() < 0 || 09248 CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX) { 09249 Parser.eatToEndOfStatement(); 09250 Error(IndexLoc, "personality routine index should be in range [0-3]"); 09251 return false; 09252 } 09253 09254 getTargetStreamer().emitPersonalityIndex(CE->getValue()); 09255 return false; 09256 } 09257 09258 /// parseDirectiveUnwindRaw 09259 /// ::= .unwind_raw offset, opcode [, opcode...] 09260 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) { 09261 if (!UC.hasFnStart()) { 09262 Parser.eatToEndOfStatement(); 09263 Error(L, ".fnstart must precede .unwind_raw directives"); 09264 return false; 09265 } 09266 09267 int64_t StackOffset; 09268 09269 const MCExpr *OffsetExpr; 09270 SMLoc OffsetLoc = getLexer().getLoc(); 09271 if (getLexer().is(AsmToken::EndOfStatement) || 09272 getParser().parseExpression(OffsetExpr)) { 09273 Error(OffsetLoc, "expected expression"); 09274 Parser.eatToEndOfStatement(); 09275 return false; 09276 } 09277 09278 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); 09279 if (!CE) { 09280 Error(OffsetLoc, "offset must be a constant"); 09281 Parser.eatToEndOfStatement(); 09282 return false; 09283 } 09284 09285 StackOffset = CE->getValue(); 09286 09287 if (getLexer().isNot(AsmToken::Comma)) { 09288 Error(getLexer().getLoc(), "expected comma"); 09289 Parser.eatToEndOfStatement(); 09290 return false; 09291 } 09292 Parser.Lex(); 09293 09294 SmallVector<uint8_t, 16> Opcodes; 09295 for (;;) { 09296 const MCExpr *OE; 09297 09298 SMLoc OpcodeLoc = getLexer().getLoc(); 09299 if (getLexer().is(AsmToken::EndOfStatement) || Parser.parseExpression(OE)) { 09300 Error(OpcodeLoc, "expected opcode expression"); 09301 Parser.eatToEndOfStatement(); 09302 return false; 09303 } 09304 09305 const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE); 09306 if (!OC) { 09307 Error(OpcodeLoc, "opcode value must be a constant"); 09308 Parser.eatToEndOfStatement(); 09309 return false; 09310 } 09311 09312 const int64_t Opcode = OC->getValue(); 09313 if (Opcode & ~0xff) { 09314 Error(OpcodeLoc, "invalid opcode"); 09315 Parser.eatToEndOfStatement(); 09316 return false; 09317 } 09318 09319 Opcodes.push_back(uint8_t(Opcode)); 09320 09321 if (getLexer().is(AsmToken::EndOfStatement)) 09322 break; 09323 09324 if (getLexer().isNot(AsmToken::Comma)) { 09325 Error(getLexer().getLoc(), "unexpected token in directive"); 09326 Parser.eatToEndOfStatement(); 09327 return false; 09328 } 09329 09330 Parser.Lex(); 09331 } 09332 09333 getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes); 09334 09335 Parser.Lex(); 09336 return false; 09337 } 09338 09339 /// parseDirectiveTLSDescSeq 09340 /// ::= .tlsdescseq tls-variable 09341 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) { 09342 if (getLexer().isNot(AsmToken::Identifier)) { 09343 TokError("expected variable after '.tlsdescseq' directive"); 09344 Parser.eatToEndOfStatement(); 09345 return false; 09346 } 09347 09348 const MCSymbolRefExpr *SRE = 09349 MCSymbolRefExpr::Create(Parser.getTok().getIdentifier(), 09350 MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext()); 09351 Lex(); 09352 09353 if (getLexer().isNot(AsmToken::EndOfStatement)) { 09354 Error(Parser.getTok().getLoc(), "unexpected token"); 09355 Parser.eatToEndOfStatement(); 09356 return false; 09357 } 09358 09359 getTargetStreamer().AnnotateTLSDescriptorSequence(SRE); 09360 return false; 09361 } 09362 09363 /// parseDirectiveMovSP 09364 /// ::= .movsp reg [, #offset] 09365 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) { 09366 if (!UC.hasFnStart()) { 09367 Parser.eatToEndOfStatement(); 09368 Error(L, ".fnstart must precede .movsp directives"); 09369 return false; 09370 } 09371 if (UC.getFPReg() != ARM::SP) { 09372 Parser.eatToEndOfStatement(); 09373 Error(L, "unexpected .movsp directive"); 09374 return false; 09375 } 09376 09377 SMLoc SPRegLoc = Parser.getTok().getLoc(); 09378 int SPReg = tryParseRegister(); 09379 if (SPReg == -1) { 09380 Parser.eatToEndOfStatement(); 09381 Error(SPRegLoc, "register expected"); 09382 return false; 09383 } 09384 09385 if (SPReg == ARM::SP || SPReg == ARM::PC) { 09386 Parser.eatToEndOfStatement(); 09387 Error(SPRegLoc, "sp and pc are not permitted in .movsp directive"); 09388 return false; 09389 } 09390 09391 int64_t Offset = 0; 09392 if (Parser.getTok().is(AsmToken::Comma)) { 09393 Parser.Lex(); 09394 09395 if (Parser.getTok().isNot(AsmToken::Hash)) { 09396 Error(Parser.getTok().getLoc(), "expected #constant"); 09397 Parser.eatToEndOfStatement(); 09398 return false; 09399 } 09400 Parser.Lex(); 09401 09402 const MCExpr *OffsetExpr; 09403 SMLoc OffsetLoc = Parser.getTok().getLoc(); 09404 if (Parser.parseExpression(OffsetExpr)) { 09405 Parser.eatToEndOfStatement(); 09406 Error(OffsetLoc, "malformed offset expression"); 09407 return false; 09408 } 09409 09410 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); 09411 if (!CE) { 09412 Parser.eatToEndOfStatement(); 09413 Error(OffsetLoc, "offset must be an immediate constant"); 09414 return false; 09415 } 09416 09417 Offset = CE->getValue(); 09418 } 09419 09420 getTargetStreamer().emitMovSP(SPReg, Offset); 09421 UC.saveFPReg(SPReg); 09422 09423 return false; 09424 } 09425 09426 /// parseDirectiveObjectArch 09427 /// ::= .object_arch name 09428 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) { 09429 if (getLexer().isNot(AsmToken::Identifier)) { 09430 Error(getLexer().getLoc(), "unexpected token"); 09431 Parser.eatToEndOfStatement(); 09432 return false; 09433 } 09434 09435 StringRef Arch = Parser.getTok().getString(); 09436 SMLoc ArchLoc = Parser.getTok().getLoc(); 09437 getLexer().Lex(); 09438 09439 unsigned ID = StringSwitch<unsigned>(Arch) 09440 #define ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH) \ 09441 .Case(NAME, ARM::ID) 09442 #define ARM_ARCH_ALIAS(NAME, ID) \ 09443 .Case(NAME, ARM::ID) 09444 #include "MCTargetDesc/ARMArchName.def" 09445 #undef ARM_ARCH_NAME 09446 #undef ARM_ARCH_ALIAS 09447 .Default(ARM::INVALID_ARCH); 09448 09449 if (ID == ARM::INVALID_ARCH) { 09450 Error(ArchLoc, "unknown architecture '" + Arch + "'"); 09451 Parser.eatToEndOfStatement(); 09452 return false; 09453 } 09454 09455 getTargetStreamer().emitObjectArch(ID); 09456 09457 if (getLexer().isNot(AsmToken::EndOfStatement)) { 09458 Error(getLexer().getLoc(), "unexpected token"); 09459 Parser.eatToEndOfStatement(); 09460 } 09461 09462 return false; 09463 } 09464 09465 /// parseDirectiveAlign 09466 /// ::= .align 09467 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) { 09468 // NOTE: if this is not the end of the statement, fall back to the target 09469 // agnostic handling for this directive which will correctly handle this. 09470 if (getLexer().isNot(AsmToken::EndOfStatement)) 09471 return true; 09472 09473 // '.align' is target specifically handled to mean 2**2 byte alignment. 09474 if (getStreamer().getCurrentSection().first->UseCodeAlign()) 09475 getStreamer().EmitCodeAlignment(4, 0); 09476 else 09477 getStreamer().EmitValueToAlignment(4, 0, 1, 0); 09478 09479 return false; 09480 } 09481 09482 /// parseDirectiveThumbSet 09483 /// ::= .thumb_set name, value 09484 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) { 09485 StringRef Name; 09486 if (Parser.parseIdentifier(Name)) { 09487 TokError("expected identifier after '.thumb_set'"); 09488 Parser.eatToEndOfStatement(); 09489 return false; 09490 } 09491 09492 if (getLexer().isNot(AsmToken::Comma)) { 09493 TokError("expected comma after name '" + Name + "'"); 09494 Parser.eatToEndOfStatement(); 09495 return false; 09496 } 09497 Lex(); 09498 09499 const MCExpr *Value; 09500 if (Parser.parseExpression(Value)) { 09501 TokError("missing expression"); 09502 Parser.eatToEndOfStatement(); 09503 return false; 09504 } 09505 09506 if (getLexer().isNot(AsmToken::EndOfStatement)) { 09507 TokError("unexpected token"); 09508 Parser.eatToEndOfStatement(); 09509 return false; 09510 } 09511 Lex(); 09512 09513 MCSymbol *Alias = getContext().GetOrCreateSymbol(Name); 09514 getTargetStreamer().emitThumbSet(Alias, Value); 09515 return false; 09516 } 09517 09518 /// Force static initialization. 09519 extern "C" void LLVMInitializeARMAsmParser() { 09520 RegisterMCAsmParser<ARMAsmParser> X(TheARMLETarget); 09521 RegisterMCAsmParser<ARMAsmParser> Y(TheARMBETarget); 09522 RegisterMCAsmParser<ARMAsmParser> A(TheThumbLETarget); 09523 RegisterMCAsmParser<ARMAsmParser> B(TheThumbBETarget); 09524 } 09525 09526 #define GET_REGISTER_MATCHER 09527 #define GET_SUBTARGET_FEATURE_NAME 09528 #define GET_MATCHER_IMPLEMENTATION 09529 #include "ARMGenAsmMatcher.inc" 09530 09531 static const struct { 09532 const char *Name; 09533 const unsigned ArchCheck; 09534 const uint64_t Features; 09535 } Extensions[] = { 09536 { "crc", Feature_HasV8, ARM::FeatureCRC }, 09537 { "crypto", Feature_HasV8, 09538 ARM::FeatureCrypto | ARM::FeatureNEON | ARM::FeatureFPARMv8 }, 09539 { "fp", Feature_HasV8, ARM::FeatureFPARMv8 }, 09540 { "idiv", Feature_HasV7 | Feature_IsNotMClass, 09541 ARM::FeatureHWDiv | ARM::FeatureHWDivARM }, 09542 // FIXME: iWMMXT not supported 09543 { "iwmmxt", Feature_None, 0 }, 09544 // FIXME: iWMMXT2 not supported 09545 { "iwmmxt2", Feature_None, 0 }, 09546 // FIXME: Maverick not supported 09547 { "maverick", Feature_None, 0 }, 09548 { "mp", Feature_HasV7 | Feature_IsNotMClass, ARM::FeatureMP }, 09549 // FIXME: ARMv6-m OS Extensions feature not checked 09550 { "os", Feature_None, 0 }, 09551 // FIXME: Also available in ARMv6-K 09552 { "sec", Feature_HasV7, ARM::FeatureTrustZone }, 09553 { "simd", Feature_HasV8, ARM::FeatureNEON | ARM::FeatureFPARMv8 }, 09554 // FIXME: Only available in A-class, isel not predicated 09555 { "virt", Feature_HasV7, ARM::FeatureVirtualization }, 09556 // FIXME: xscale not supported 09557 { "xscale", Feature_None, 0 }, 09558 }; 09559 09560 /// parseDirectiveArchExtension 09561 /// ::= .arch_extension [no]feature 09562 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) { 09563 if (getLexer().isNot(AsmToken::Identifier)) { 09564 Error(getLexer().getLoc(), "unexpected token"); 09565 Parser.eatToEndOfStatement(); 09566 return false; 09567 } 09568 09569 StringRef Name = Parser.getTok().getString(); 09570 SMLoc ExtLoc = Parser.getTok().getLoc(); 09571 getLexer().Lex(); 09572 09573 bool EnableFeature = true; 09574 if (Name.startswith_lower("no")) { 09575 EnableFeature = false; 09576 Name = Name.substr(2); 09577 } 09578 09579 for (const auto &Extension : Extensions) { 09580 if (Extension.Name != Name) 09581 continue; 09582 09583 if (!Extension.Features) 09584 report_fatal_error("unsupported architectural extension: " + Name); 09585 09586 if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck) { 09587 Error(ExtLoc, "architectural extension '" + Name + "' is not " 09588 "allowed for the current base architecture"); 09589 return false; 09590 } 09591 09592 uint64_t ToggleFeatures = EnableFeature 09593 ? (~STI.getFeatureBits() & Extension.Features) 09594 : ( STI.getFeatureBits() & Extension.Features); 09595 uint64_t Features = 09596 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures)); 09597 setAvailableFeatures(Features); 09598 return false; 09599 } 09600 09601 Error(ExtLoc, "unknown architectural extension: " + Name); 09602 Parser.eatToEndOfStatement(); 09603 return false; 09604 } 09605 09606 // Define this matcher function after the auto-generated include so we 09607 // have the match class enum definitions. 09608 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, 09609 unsigned Kind) { 09610 ARMOperand &Op = static_cast<ARMOperand &>(AsmOp); 09611 // If the kind is a token for a literal immediate, check if our asm 09612 // operand matches. This is for InstAliases which have a fixed-value 09613 // immediate in the syntax. 09614 switch (Kind) { 09615 default: break; 09616 case MCK__35_0: 09617 if (Op.isImm()) 09618 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm())) 09619 if (CE->getValue() == 0) 09620 return Match_Success; 09621 break; 09622 case MCK_ARMSOImm: 09623 if (Op.isImm()) { 09624 const MCExpr *SOExpr = Op.getImm(); 09625 int64_t Value; 09626 if (!SOExpr->EvaluateAsAbsolute(Value)) 09627 return Match_Success; 09628 assert((Value >= INT32_MIN && Value <= UINT32_MAX) && 09629 "expression value must be representable in 32 bits"); 09630 } 09631 break; 09632 case MCK_GPRPair: 09633 if (Op.isReg() && 09634 MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg())) 09635 return Match_Success; 09636 break; 09637 } 09638 return Match_InvalidOperand; 09639 }