LLVM API Documentation

X86MCCodeEmitter.cpp
Go to the documentation of this file.
00001 //===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file implements the X86MCCodeEmitter class.
00011 //
00012 //===----------------------------------------------------------------------===//
00013 
00014 #include "MCTargetDesc/X86MCTargetDesc.h"
00015 #include "MCTargetDesc/X86BaseInfo.h"
00016 #include "MCTargetDesc/X86FixupKinds.h"
00017 #include "llvm/MC/MCCodeEmitter.h"
00018 #include "llvm/MC/MCContext.h"
00019 #include "llvm/MC/MCExpr.h"
00020 #include "llvm/MC/MCInst.h"
00021 #include "llvm/MC/MCInstrInfo.h"
00022 #include "llvm/MC/MCRegisterInfo.h"
00023 #include "llvm/MC/MCSubtargetInfo.h"
00024 #include "llvm/MC/MCSymbol.h"
00025 #include "llvm/Support/raw_ostream.h"
00026 
00027 using namespace llvm;
00028 
00029 #define DEBUG_TYPE "mccodeemitter"
00030 
00031 namespace {
00032 class X86MCCodeEmitter : public MCCodeEmitter {
00033   X86MCCodeEmitter(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION;
00034   void operator=(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION;
00035   const MCInstrInfo &MCII;
00036   MCContext &Ctx;
00037 public:
00038   X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
00039     : MCII(mcii), Ctx(ctx) {
00040   }
00041 
00042   ~X86MCCodeEmitter() {}
00043 
00044   bool is64BitMode(const MCSubtargetInfo &STI) const {
00045     return (STI.getFeatureBits() & X86::Mode64Bit) != 0;
00046   }
00047 
00048   bool is32BitMode(const MCSubtargetInfo &STI) const {
00049     return (STI.getFeatureBits() & X86::Mode32Bit) != 0;
00050   }
00051 
00052   bool is16BitMode(const MCSubtargetInfo &STI) const {
00053     return (STI.getFeatureBits() & X86::Mode16Bit) != 0;
00054   }
00055 
00056   /// Is16BitMemOperand - Return true if the specified instruction has
00057   /// a 16-bit memory operand. Op specifies the operand # of the memoperand.
00058   bool Is16BitMemOperand(const MCInst &MI, unsigned Op,
00059                          const MCSubtargetInfo &STI) const {
00060     const MCOperand &BaseReg  = MI.getOperand(Op+X86::AddrBaseReg);
00061     const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
00062     const MCOperand &Disp     = MI.getOperand(Op+X86::AddrDisp);
00063 
00064     if (is16BitMode(STI) && BaseReg.getReg() == 0 &&
00065         Disp.isImm() && Disp.getImm() < 0x10000)
00066       return true;
00067     if ((BaseReg.getReg() != 0 &&
00068          X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) ||
00069         (IndexReg.getReg() != 0 &&
00070          X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg())))
00071       return true;
00072     return false;
00073   }
00074 
00075   unsigned GetX86RegNum(const MCOperand &MO) const {
00076     return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
00077   }
00078 
00079   // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range
00080   // 0-7 and the difference between the 2 groups is given by the REX prefix.
00081   // In the VEX prefix, registers are seen sequencially from 0-15 and encoded
00082   // in 1's complement form, example:
00083   //
00084   //  ModRM field => XMM9 => 1
00085   //  VEX.VVVV    => XMM9 => ~9
00086   //
00087   // See table 4-35 of Intel AVX Programming Reference for details.
00088   unsigned char getVEXRegisterEncoding(const MCInst &MI,
00089                                        unsigned OpNum) const {
00090     unsigned SrcReg = MI.getOperand(OpNum).getReg();
00091     unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum));
00092     if (X86II::isX86_64ExtendedReg(SrcReg))
00093       SrcRegNum |= 8;
00094 
00095     // The registers represented through VEX_VVVV should
00096     // be encoded in 1's complement form.
00097     return (~SrcRegNum) & 0xf;
00098   }
00099 
00100   unsigned char getWriteMaskRegisterEncoding(const MCInst &MI,
00101                                              unsigned OpNum) const {
00102     assert(X86::K0 != MI.getOperand(OpNum).getReg() &&
00103            "Invalid mask register as write-mask!");
00104     unsigned MaskRegNum = GetX86RegNum(MI.getOperand(OpNum));
00105     return MaskRegNum;
00106   }
00107 
00108   void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
00109     OS << (char)C;
00110     ++CurByte;
00111   }
00112 
00113   void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
00114                     raw_ostream &OS) const {
00115     // Output the constant in little endian byte order.
00116     for (unsigned i = 0; i != Size; ++i) {
00117       EmitByte(Val & 255, CurByte, OS);
00118       Val >>= 8;
00119     }
00120   }
00121 
00122   void EmitImmediate(const MCOperand &Disp, SMLoc Loc,
00123                      unsigned ImmSize, MCFixupKind FixupKind,
00124                      unsigned &CurByte, raw_ostream &OS,
00125                      SmallVectorImpl<MCFixup> &Fixups,
00126                      int ImmOffset = 0) const;
00127 
00128   inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode,
00129                                         unsigned RM) {
00130     assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
00131     return RM | (RegOpcode << 3) | (Mod << 6);
00132   }
00133 
00134   void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
00135                         unsigned &CurByte, raw_ostream &OS) const {
00136     EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS);
00137   }
00138 
00139   void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base,
00140                    unsigned &CurByte, raw_ostream &OS) const {
00141     // SIB byte is in the same format as the ModRMByte.
00142     EmitByte(ModRMByte(SS, Index, Base), CurByte, OS);
00143   }
00144 
00145 
00146   void EmitMemModRMByte(const MCInst &MI, unsigned Op,
00147                         unsigned RegOpcodeField,
00148                         uint64_t TSFlags, unsigned &CurByte, raw_ostream &OS,
00149                         SmallVectorImpl<MCFixup> &Fixups,
00150                         const MCSubtargetInfo &STI) const;
00151 
00152   void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
00153                          SmallVectorImpl<MCFixup> &Fixups,
00154                          const MCSubtargetInfo &STI) const override;
00155 
00156   void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
00157                            const MCInst &MI, const MCInstrDesc &Desc,
00158                            raw_ostream &OS) const;
00159 
00160   void EmitSegmentOverridePrefix(unsigned &CurByte, unsigned SegOperand,
00161                                  const MCInst &MI, raw_ostream &OS) const;
00162 
00163   void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
00164                         const MCInst &MI, const MCInstrDesc &Desc,
00165                         const MCSubtargetInfo &STI,
00166                         raw_ostream &OS) const;
00167 };
00168 
00169 } // end anonymous namespace
00170 
00171 
00172 MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
00173                                             const MCRegisterInfo &MRI,
00174                                             const MCSubtargetInfo &STI,
00175                                             MCContext &Ctx) {
00176   return new X86MCCodeEmitter(MCII, Ctx);
00177 }
00178 
00179 /// isDisp8 - Return true if this signed displacement fits in a 8-bit
00180 /// sign-extended field.
00181 static bool isDisp8(int Value) {
00182   return Value == (signed char)Value;
00183 }
00184 
00185 /// isCDisp8 - Return true if this signed displacement fits in a 8-bit
00186 /// compressed dispacement field.
00187 static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) {
00188   assert(((TSFlags & X86II::EncodingMask) >>
00189           X86II::EncodingShift == X86II::EVEX) &&
00190          "Compressed 8-bit displacement is only valid for EVEX inst.");
00191 
00192   unsigned CD8_Scale =
00193     (TSFlags >> X86II::CD8_Scale_Shift) & X86II::CD8_Scale_Mask;
00194   if (CD8_Scale == 0) {
00195     CValue = Value;
00196     return isDisp8(Value);
00197   }
00198 
00199   unsigned Mask = CD8_Scale - 1;
00200   assert((CD8_Scale & Mask) == 0 && "Invalid memory object size.");
00201   if (Value & Mask) // Unaligned offset
00202     return false;
00203   Value /= (int)CD8_Scale;
00204   bool Ret = (Value == (signed char)Value);
00205 
00206   if (Ret)
00207     CValue = Value;
00208   return Ret;
00209 }
00210 
00211 /// getImmFixupKind - Return the appropriate fixup kind to use for an immediate
00212 /// in an instruction with the specified TSFlags.
00213 static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
00214   unsigned Size = X86II::getSizeOfImm(TSFlags);
00215   bool isPCRel = X86II::isImmPCRel(TSFlags);
00216 
00217   if (X86II::isImmSigned(TSFlags)) {
00218     switch (Size) {
00219     default: llvm_unreachable("Unsupported signed fixup size!");
00220     case 4: return MCFixupKind(X86::reloc_signed_4byte);
00221     }
00222   }
00223   return MCFixup::getKindForSize(Size, isPCRel);
00224 }
00225 
00226 /// Is32BitMemOperand - Return true if the specified instruction has
00227 /// a 32-bit memory operand. Op specifies the operand # of the memoperand.
00228 static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) {
00229   const MCOperand &BaseReg  = MI.getOperand(Op+X86::AddrBaseReg);
00230   const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
00231 
00232   if ((BaseReg.getReg() != 0 &&
00233        X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) ||
00234       (IndexReg.getReg() != 0 &&
00235        X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg())))
00236     return true;
00237   return false;
00238 }
00239 
00240 /// Is64BitMemOperand - Return true if the specified instruction has
00241 /// a 64-bit memory operand. Op specifies the operand # of the memoperand.
00242 #ifndef NDEBUG
00243 static bool Is64BitMemOperand(const MCInst &MI, unsigned Op) {
00244   const MCOperand &BaseReg  = MI.getOperand(Op+X86::AddrBaseReg);
00245   const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
00246 
00247   if ((BaseReg.getReg() != 0 &&
00248        X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg.getReg())) ||
00249       (IndexReg.getReg() != 0 &&
00250        X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg.getReg())))
00251     return true;
00252   return false;
00253 }
00254 #endif
00255 
00256 /// StartsWithGlobalOffsetTable - Check if this expression starts with
00257 ///  _GLOBAL_OFFSET_TABLE_ and if it is of the form
00258 ///  _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on ELF
00259 /// i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
00260 /// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start
00261 /// of a binary expression.
00262 enum GlobalOffsetTableExprKind {
00263   GOT_None,
00264   GOT_Normal,
00265   GOT_SymDiff
00266 };
00267 static GlobalOffsetTableExprKind
00268 StartsWithGlobalOffsetTable(const MCExpr *Expr) {
00269   const MCExpr *RHS = nullptr;
00270   if (Expr->getKind() == MCExpr::Binary) {
00271     const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
00272     Expr = BE->getLHS();
00273     RHS = BE->getRHS();
00274   }
00275 
00276   if (Expr->getKind() != MCExpr::SymbolRef)
00277     return GOT_None;
00278 
00279   const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
00280   const MCSymbol &S = Ref->getSymbol();
00281   if (S.getName() != "_GLOBAL_OFFSET_TABLE_")
00282     return GOT_None;
00283   if (RHS && RHS->getKind() == MCExpr::SymbolRef)
00284     return GOT_SymDiff;
00285   return GOT_Normal;
00286 }
00287 
00288 static bool HasSecRelSymbolRef(const MCExpr *Expr) {
00289   if (Expr->getKind() == MCExpr::SymbolRef) {
00290     const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
00291     return Ref->getKind() == MCSymbolRefExpr::VK_SECREL;
00292   }
00293   return false;
00294 }
00295 
00296 void X86MCCodeEmitter::
00297 EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
00298               MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS,
00299               SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const {
00300   const MCExpr *Expr = nullptr;
00301   if (DispOp.isImm()) {
00302     // If this is a simple integer displacement that doesn't require a
00303     // relocation, emit it now.
00304     if (FixupKind != FK_PCRel_1 &&
00305         FixupKind != FK_PCRel_2 &&
00306         FixupKind != FK_PCRel_4) {
00307       EmitConstant(DispOp.getImm()+ImmOffset, Size, CurByte, OS);
00308       return;
00309     }
00310     Expr = MCConstantExpr::Create(DispOp.getImm(), Ctx);
00311   } else {
00312     Expr = DispOp.getExpr();
00313   }
00314 
00315   // If we have an immoffset, add it to the expression.
00316   if ((FixupKind == FK_Data_4 ||
00317        FixupKind == FK_Data_8 ||
00318        FixupKind == MCFixupKind(X86::reloc_signed_4byte))) {
00319     GlobalOffsetTableExprKind Kind = StartsWithGlobalOffsetTable(Expr);
00320     if (Kind != GOT_None) {
00321       assert(ImmOffset == 0);
00322 
00323       if (Size == 8) {
00324         FixupKind = MCFixupKind(X86::reloc_global_offset_table8);
00325       } else {
00326         assert(Size == 4);
00327         FixupKind = MCFixupKind(X86::reloc_global_offset_table);
00328       }
00329 
00330       if (Kind == GOT_Normal)
00331         ImmOffset = CurByte;
00332     } else if (Expr->getKind() == MCExpr::SymbolRef) {
00333       if (HasSecRelSymbolRef(Expr)) {
00334         FixupKind = MCFixupKind(FK_SecRel_4);
00335       }
00336     } else if (Expr->getKind() == MCExpr::Binary) {
00337       const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr*>(Expr);
00338       if (HasSecRelSymbolRef(Bin->getLHS())
00339           || HasSecRelSymbolRef(Bin->getRHS())) {
00340         FixupKind = MCFixupKind(FK_SecRel_4);
00341       }
00342     }
00343   }
00344 
00345   // If the fixup is pc-relative, we need to bias the value to be relative to
00346   // the start of the field, not the end of the field.
00347   if (FixupKind == FK_PCRel_4 ||
00348       FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
00349       FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load))
00350     ImmOffset -= 4;
00351   if (FixupKind == FK_PCRel_2)
00352     ImmOffset -= 2;
00353   if (FixupKind == FK_PCRel_1)
00354     ImmOffset -= 1;
00355 
00356   if (ImmOffset)
00357     Expr = MCBinaryExpr::CreateAdd(Expr, MCConstantExpr::Create(ImmOffset, Ctx),
00358                                    Ctx);
00359 
00360   // Emit a symbolic constant as a fixup and 4 zeros.
00361   Fixups.push_back(MCFixup::Create(CurByte, Expr, FixupKind, Loc));
00362   EmitConstant(0, Size, CurByte, OS);
00363 }
00364 
00365 void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
00366                                         unsigned RegOpcodeField,
00367                                         uint64_t TSFlags, unsigned &CurByte,
00368                                         raw_ostream &OS,
00369                                         SmallVectorImpl<MCFixup> &Fixups,
00370                                         const MCSubtargetInfo &STI) const{
00371   const MCOperand &Disp     = MI.getOperand(Op+X86::AddrDisp);
00372   const MCOperand &Base     = MI.getOperand(Op+X86::AddrBaseReg);
00373   const MCOperand &Scale    = MI.getOperand(Op+X86::AddrScaleAmt);
00374   const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
00375   unsigned BaseReg = Base.getReg();
00376   unsigned char Encoding = (TSFlags & X86II::EncodingMask) >>
00377                            X86II::EncodingShift;
00378   bool HasEVEX = (Encoding == X86II::EVEX);
00379 
00380   // Handle %rip relative addressing.
00381   if (BaseReg == X86::RIP) {    // [disp32+RIP] in X86-64 mode
00382     assert(is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode");
00383     assert(IndexReg.getReg() == 0 && "Invalid rip-relative address");
00384     EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
00385 
00386     unsigned FixupKind = X86::reloc_riprel_4byte;
00387 
00388     // movq loads are handled with a special relocation form which allows the
00389     // linker to eliminate some loads for GOT references which end up in the
00390     // same linkage unit.
00391     if (MI.getOpcode() == X86::MOV64rm)
00392       FixupKind = X86::reloc_riprel_4byte_movq_load;
00393 
00394     // rip-relative addressing is actually relative to the *next* instruction.
00395     // Since an immediate can follow the mod/rm byte for an instruction, this
00396     // means that we need to bias the immediate field of the instruction with
00397     // the size of the immediate field.  If we have this case, add it into the
00398     // expression to emit.
00399     int ImmSize = X86II::hasImm(TSFlags) ? X86II::getSizeOfImm(TSFlags) : 0;
00400 
00401     EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind),
00402                   CurByte, OS, Fixups, -ImmSize);
00403     return;
00404   }
00405 
00406   unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U;
00407 
00408   // 16-bit addressing forms of the ModR/M byte have a different encoding for
00409   // the R/M field and are far more limited in which registers can be used.
00410   if (Is16BitMemOperand(MI, Op, STI)) {
00411     if (BaseReg) {
00412       // For 32-bit addressing, the row and column values in Table 2-2 are
00413       // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
00414       // some special cases. And GetX86RegNum reflects that numbering.
00415       // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
00416       // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
00417       // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
00418       // while values 0-3 indicate the allowed combinations (base+index) of
00419       // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
00420       //
00421       // R16Table[] is a lookup from the normal RegNo, to the row values from
00422       // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
00423       static const unsigned R16Table[] = { 0, 0, 0, 7, 0, 6, 4, 5 };
00424       unsigned RMfield = R16Table[BaseRegNo];
00425 
00426       assert(RMfield && "invalid 16-bit base register");
00427 
00428       if (IndexReg.getReg()) {
00429         unsigned IndexReg16 = R16Table[GetX86RegNum(IndexReg)];
00430 
00431         assert(IndexReg16 && "invalid 16-bit index register");
00432         // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
00433         assert(((IndexReg16 ^ RMfield) & 2) &&
00434                "invalid 16-bit base/index register combination");
00435         assert(Scale.getImm() == 1 &&
00436                "invalid scale for 16-bit memory reference");
00437 
00438         // Allow base/index to appear in either order (although GAS doesn't).
00439         if (IndexReg16 & 2)
00440           RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1);
00441         else
00442           RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1);
00443       }
00444 
00445       if (Disp.isImm() && isDisp8(Disp.getImm())) {
00446         if (Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
00447           // There is no displacement; just the register.
00448           EmitByte(ModRMByte(0, RegOpcodeField, RMfield), CurByte, OS);
00449           return;
00450         }
00451         // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
00452         EmitByte(ModRMByte(1, RegOpcodeField, RMfield), CurByte, OS);
00453         EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
00454         return;
00455       }
00456       // This is the [REG]+disp16 case.
00457       EmitByte(ModRMByte(2, RegOpcodeField, RMfield), CurByte, OS);
00458     } else {
00459       // There is no BaseReg; this is the plain [disp16] case.
00460       EmitByte(ModRMByte(0, RegOpcodeField, 6), CurByte, OS);
00461     }
00462 
00463     // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
00464     EmitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, CurByte, OS, Fixups);
00465     return;
00466   }
00467 
00468   // Determine whether a SIB byte is needed.
00469   // If no BaseReg, issue a RIP relative instruction only if the MCE can
00470   // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
00471   // 2-7) and absolute references.
00472 
00473   if (// The SIB byte must be used if there is an index register.
00474       IndexReg.getReg() == 0 &&
00475       // The SIB byte must be used if the base is ESP/RSP/R12, all of which
00476       // encode to an R/M value of 4, which indicates that a SIB byte is
00477       // present.
00478       BaseRegNo != N86::ESP &&
00479       // If there is no base register and we're in 64-bit mode, we need a SIB
00480       // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
00481       (!is64BitMode(STI) || BaseReg != 0)) {
00482 
00483     if (BaseReg == 0) {          // [disp32]     in X86-32 mode
00484       EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
00485       EmitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, CurByte, OS, Fixups);
00486       return;
00487     }
00488 
00489     // If the base is not EBP/ESP and there is no displacement, use simple
00490     // indirect register encoding, this handles addresses like [EAX].  The
00491     // encoding for [EBP] with no displacement means [disp32] so we handle it
00492     // by emitting a displacement of 0 below.
00493     if (Disp.isImm() && Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
00494       EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS);
00495       return;
00496     }
00497 
00498     // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
00499     if (Disp.isImm()) {
00500       if (!HasEVEX && isDisp8(Disp.getImm())) {
00501         EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
00502         EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
00503         return;
00504       }
00505       // Try EVEX compressed 8-bit displacement first; if failed, fall back to
00506       // 32-bit displacement.
00507       int CDisp8 = 0;
00508       if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
00509         EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
00510         EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups,
00511                       CDisp8 - Disp.getImm());
00512         return;
00513       }
00514     }
00515 
00516     // Otherwise, emit the most general non-SIB encoding: [REG+disp32]
00517     EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
00518     EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte), CurByte, OS,
00519                   Fixups);
00520     return;
00521   }
00522 
00523   // We need a SIB byte, so start by outputting the ModR/M byte first
00524   assert(IndexReg.getReg() != X86::ESP &&
00525          IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
00526 
00527   bool ForceDisp32 = false;
00528   bool ForceDisp8  = false;
00529   int CDisp8 = 0;
00530   int ImmOffset = 0;
00531   if (BaseReg == 0) {
00532     // If there is no base register, we emit the special case SIB byte with
00533     // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
00534     EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
00535     ForceDisp32 = true;
00536   } else if (!Disp.isImm()) {
00537     // Emit the normal disp32 encoding.
00538     EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
00539     ForceDisp32 = true;
00540   } else if (Disp.getImm() == 0 &&
00541              // Base reg can't be anything that ends up with '5' as the base
00542              // reg, it is the magic [*] nomenclature that indicates no base.
00543              BaseRegNo != N86::EBP) {
00544     // Emit no displacement ModR/M byte
00545     EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
00546   } else if (!HasEVEX && isDisp8(Disp.getImm())) {
00547     // Emit the disp8 encoding.
00548     EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
00549     ForceDisp8 = true;           // Make sure to force 8 bit disp if Base=EBP
00550   } else if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
00551     // Emit the disp8 encoding.
00552     EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
00553     ForceDisp8 = true;           // Make sure to force 8 bit disp if Base=EBP
00554     ImmOffset = CDisp8 - Disp.getImm();
00555   } else {
00556     // Emit the normal disp32 encoding.
00557     EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
00558   }
00559 
00560   // Calculate what the SS field value should be...
00561   static const unsigned SSTable[] = { ~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3 };
00562   unsigned SS = SSTable[Scale.getImm()];
00563 
00564   if (BaseReg == 0) {
00565     // Handle the SIB byte for the case where there is no base, see Intel
00566     // Manual 2A, table 2-7. The displacement has already been output.
00567     unsigned IndexRegNo;
00568     if (IndexReg.getReg())
00569       IndexRegNo = GetX86RegNum(IndexReg);
00570     else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5)
00571       IndexRegNo = 4;
00572     EmitSIBByte(SS, IndexRegNo, 5, CurByte, OS);
00573   } else {
00574     unsigned IndexRegNo;
00575     if (IndexReg.getReg())
00576       IndexRegNo = GetX86RegNum(IndexReg);
00577     else
00578       IndexRegNo = 4;   // For example [ESP+1*<noreg>+4]
00579     EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS);
00580   }
00581 
00582   // Do we need to output a displacement?
00583   if (ForceDisp8)
00584     EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, ImmOffset);
00585   else if (ForceDisp32 || Disp.getImm() != 0)
00586     EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
00587                   CurByte, OS, Fixups);
00588 }
00589 
00590 /// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix
00591 /// called VEX.
00592 void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
00593                                            int MemOperand, const MCInst &MI,
00594                                            const MCInstrDesc &Desc,
00595                                            raw_ostream &OS) const {
00596   unsigned char Encoding = (TSFlags & X86II::EncodingMask) >>
00597                            X86II::EncodingShift;
00598   bool HasEVEX_K = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
00599   bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
00600   bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
00601   bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
00602   bool HasEVEX_RC = (TSFlags >> X86II::VEXShift) & X86II::EVEX_RC;
00603 
00604   // VEX_R: opcode externsion equivalent to REX.R in
00605   // 1's complement (inverted) form
00606   //
00607   //  1: Same as REX_R=0 (must be 1 in 32-bit mode)
00608   //  0: Same as REX_R=1 (64 bit mode only)
00609   //
00610   unsigned char VEX_R = 0x1;
00611   unsigned char EVEX_R2 = 0x1;
00612 
00613   // VEX_X: equivalent to REX.X, only used when a
00614   // register is used for index in SIB Byte.
00615   //
00616   //  1: Same as REX.X=0 (must be 1 in 32-bit mode)
00617   //  0: Same as REX.X=1 (64-bit mode only)
00618   unsigned char VEX_X = 0x1;
00619 
00620   // VEX_B:
00621   //
00622   //  1: Same as REX_B=0 (ignored in 32-bit mode)
00623   //  0: Same as REX_B=1 (64 bit mode only)
00624   //
00625   unsigned char VEX_B = 0x1;
00626 
00627   // VEX_W: opcode specific (use like REX.W, or used for
00628   // opcode extension, or ignored, depending on the opcode byte)
00629   unsigned char VEX_W = 0;
00630 
00631   // VEX_5M (VEX m-mmmmm field):
00632   //
00633   //  0b00000: Reserved for future use
00634   //  0b00001: implied 0F leading opcode
00635   //  0b00010: implied 0F 38 leading opcode bytes
00636   //  0b00011: implied 0F 3A leading opcode bytes
00637   //  0b00100-0b11111: Reserved for future use
00638   //  0b01000: XOP map select - 08h instructions with imm byte
00639   //  0b01001: XOP map select - 09h instructions with no imm byte
00640   //  0b01010: XOP map select - 0Ah instructions with imm dword
00641   unsigned char VEX_5M = 0;
00642 
00643   // VEX_4V (VEX vvvv field): a register specifier
00644   // (in 1's complement form) or 1111 if unused.
00645   unsigned char VEX_4V = 0xf;
00646   unsigned char EVEX_V2 = 0x1;
00647 
00648   // VEX_L (Vector Length):
00649   //
00650   //  0: scalar or 128-bit vector
00651   //  1: 256-bit vector
00652   //
00653   unsigned char VEX_L = 0;
00654   unsigned char EVEX_L2 = 0;
00655 
00656   // VEX_PP: opcode extension providing equivalent
00657   // functionality of a SIMD prefix
00658   //
00659   //  0b00: None
00660   //  0b01: 66
00661   //  0b10: F3
00662   //  0b11: F2
00663   //
00664   unsigned char VEX_PP = 0;
00665 
00666   // EVEX_U
00667   unsigned char EVEX_U = 1; // Always '1' so far
00668 
00669   // EVEX_z
00670   unsigned char EVEX_z = 0;
00671 
00672   // EVEX_b
00673   unsigned char EVEX_b = 0;
00674 
00675   // EVEX_rc
00676   unsigned char EVEX_rc = 0;
00677 
00678   // EVEX_aaa
00679   unsigned char EVEX_aaa = 0;
00680 
00681   bool EncodeRC = false;
00682 
00683   if ((TSFlags >> X86II::VEXShift) & X86II::VEX_W)
00684     VEX_W = 1;
00685 
00686   if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L)
00687     VEX_L = 1;
00688   if (((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2))
00689     EVEX_L2 = 1;
00690 
00691   if (HasEVEX_K && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_Z))
00692     EVEX_z = 1;
00693 
00694   if (((TSFlags >> X86II::VEXShift) & X86II::EVEX_B))
00695     EVEX_b = 1;
00696 
00697   switch (TSFlags & X86II::OpPrefixMask) {
00698   default: break; // VEX_PP already correct
00699   case X86II::PD: VEX_PP = 0x1; break; // 66
00700   case X86II::XS: VEX_PP = 0x2; break; // F3
00701   case X86II::XD: VEX_PP = 0x3; break; // F2
00702   }
00703 
00704   switch (TSFlags & X86II::OpMapMask) {
00705   default: llvm_unreachable("Invalid prefix!");
00706   case X86II::TB:   VEX_5M = 0x1; break; // 0F
00707   case X86II::T8:   VEX_5M = 0x2; break; // 0F 38
00708   case X86II::TA:   VEX_5M = 0x3; break; // 0F 3A
00709   case X86II::XOP8: VEX_5M = 0x8; break;
00710   case X86II::XOP9: VEX_5M = 0x9; break;
00711   case X86II::XOPA: VEX_5M = 0xA; break;
00712   }
00713 
00714   // Classify VEX_B, VEX_4V, VEX_R, VEX_X
00715   unsigned NumOps = Desc.getNumOperands();
00716   unsigned CurOp = X86II::getOperandBias(Desc);
00717 
00718   switch (TSFlags & X86II::FormMask) {
00719   default: llvm_unreachable("Unexpected form in EmitVEXOpcodePrefix!");
00720   case X86II::RawFrm:
00721     break;
00722   case X86II::MRMDestMem: {
00723     // MRMDestMem instructions forms:
00724     //  MemAddr, src1(ModR/M)
00725     //  MemAddr, src1(VEX_4V), src2(ModR/M)
00726     //  MemAddr, src1(ModR/M), imm8
00727     //
00728     if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand + 
00729                                                  X86::AddrBaseReg).getReg()))
00730       VEX_B = 0x0;
00731     if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
00732                                                  X86::AddrIndexReg).getReg()))
00733       VEX_X = 0x0;
00734     if (X86II::is32ExtendedReg(MI.getOperand(MemOperand +
00735                                           X86::AddrIndexReg).getReg()))
00736       EVEX_V2 = 0x0;
00737 
00738     CurOp += X86::AddrNumOperands;
00739 
00740     if (HasEVEX_K)
00741       EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
00742 
00743     if (HasVEX_4V) {
00744       VEX_4V = getVEXRegisterEncoding(MI, CurOp);
00745       if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
00746         EVEX_V2 = 0x0;
00747       CurOp++;
00748     }
00749 
00750     const MCOperand &MO = MI.getOperand(CurOp);
00751     if (MO.isReg()) {
00752       if (X86II::isX86_64ExtendedReg(MO.getReg()))
00753         VEX_R = 0x0;
00754       if (X86II::is32ExtendedReg(MO.getReg()))
00755         EVEX_R2 = 0x0;
00756     }
00757     break;
00758   }
00759   case X86II::MRMSrcMem:
00760     // MRMSrcMem instructions forms:
00761     //  src1(ModR/M), MemAddr
00762     //  src1(ModR/M), src2(VEX_4V), MemAddr
00763     //  src1(ModR/M), MemAddr, imm8
00764     //  src1(ModR/M), MemAddr, src2(VEX_I8IMM)
00765     //
00766     //  FMA4:
00767     //  dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
00768     //  dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
00769     if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
00770       VEX_R = 0x0;
00771     if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
00772       EVEX_R2 = 0x0;
00773     CurOp++;
00774 
00775     if (HasEVEX_K)
00776       EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
00777 
00778     if (HasVEX_4V) {
00779       VEX_4V = getVEXRegisterEncoding(MI, CurOp);
00780       if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
00781         EVEX_V2 = 0x0;
00782       CurOp++;
00783     }
00784 
00785     if (X86II::isX86_64ExtendedReg(
00786                MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
00787       VEX_B = 0x0;
00788     if (X86II::isX86_64ExtendedReg(
00789                MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
00790       VEX_X = 0x0;
00791     if (X86II::is32ExtendedReg(MI.getOperand(MemOperand +
00792                                X86::AddrIndexReg).getReg()))
00793       EVEX_V2 = 0x0;
00794 
00795     if (HasVEX_4VOp3)
00796       // Instruction format for 4VOp3:
00797       //   src1(ModR/M), MemAddr, src3(VEX_4V)
00798       // CurOp points to start of the MemoryOperand,
00799       //   it skips TIED_TO operands if exist, then increments past src1.
00800       // CurOp + X86::AddrNumOperands will point to src3.
00801       VEX_4V = getVEXRegisterEncoding(MI, CurOp+X86::AddrNumOperands);
00802     break;
00803   case X86II::MRM0m: case X86II::MRM1m:
00804   case X86II::MRM2m: case X86II::MRM3m:
00805   case X86II::MRM4m: case X86II::MRM5m:
00806   case X86II::MRM6m: case X86II::MRM7m: {
00807     // MRM[0-9]m instructions forms:
00808     //  MemAddr
00809     //  src1(VEX_4V), MemAddr
00810     if (HasVEX_4V) {
00811       VEX_4V = getVEXRegisterEncoding(MI, CurOp);
00812       if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
00813         EVEX_V2 = 0x0;
00814       CurOp++;
00815     }
00816 
00817     if (HasEVEX_K)
00818       EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
00819 
00820     if (X86II::isX86_64ExtendedReg(
00821                MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
00822       VEX_B = 0x0;
00823     if (X86II::isX86_64ExtendedReg(
00824                MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
00825       VEX_X = 0x0;
00826     break;
00827   }
00828   case X86II::MRMSrcReg:
00829     // MRMSrcReg instructions forms:
00830     //  dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
00831     //  dst(ModR/M), src1(ModR/M)
00832     //  dst(ModR/M), src1(ModR/M), imm8
00833     //
00834     //  FMA4:
00835     //  dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
00836     //  dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
00837     if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
00838       VEX_R = 0x0;
00839     if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
00840       EVEX_R2 = 0x0;
00841     CurOp++;
00842 
00843     if (HasEVEX_K)
00844       EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
00845 
00846     if (HasVEX_4V) {
00847       VEX_4V = getVEXRegisterEncoding(MI, CurOp);
00848       if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
00849         EVEX_V2 = 0x0;
00850       CurOp++;
00851     }
00852 
00853     if (HasMemOp4) // Skip second register source (encoded in I8IMM)
00854       CurOp++;
00855 
00856     if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
00857       VEX_B = 0x0;
00858     if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
00859       VEX_X = 0x0;
00860     CurOp++;
00861     if (HasVEX_4VOp3)
00862       VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
00863     if (EVEX_b) {
00864       if (HasEVEX_RC) {
00865         unsigned RcOperand = NumOps-1;
00866         assert(RcOperand >= CurOp);
00867         EVEX_rc = MI.getOperand(RcOperand).getImm() & 0x3;
00868       }
00869       EncodeRC = true;
00870     }      
00871     break;
00872   case X86II::MRMDestReg:
00873     // MRMDestReg instructions forms:
00874     //  dst(ModR/M), src(ModR/M)
00875     //  dst(ModR/M), src(ModR/M), imm8
00876     //  dst(ModR/M), src1(VEX_4V), src2(ModR/M)
00877     if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
00878       VEX_B = 0x0;
00879     if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
00880       VEX_X = 0x0;
00881     CurOp++;
00882 
00883     if (HasEVEX_K)
00884       EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
00885 
00886     if (HasVEX_4V) {
00887       VEX_4V = getVEXRegisterEncoding(MI, CurOp);
00888       if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
00889         EVEX_V2 = 0x0;
00890       CurOp++;
00891     }
00892 
00893     if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
00894       VEX_R = 0x0;
00895     if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
00896       EVEX_R2 = 0x0;
00897     if (EVEX_b)
00898       EncodeRC = true;
00899     break;
00900   case X86II::MRM0r: case X86II::MRM1r:
00901   case X86II::MRM2r: case X86II::MRM3r:
00902   case X86II::MRM4r: case X86II::MRM5r:
00903   case X86II::MRM6r: case X86II::MRM7r:
00904     // MRM0r-MRM7r instructions forms:
00905     //  dst(VEX_4V), src(ModR/M), imm8
00906     if (HasVEX_4V) {
00907       VEX_4V = getVEXRegisterEncoding(MI, CurOp);
00908       if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
00909           EVEX_V2 = 0x0;
00910       CurOp++;
00911     }
00912     if (HasEVEX_K)
00913       EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
00914 
00915     if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
00916       VEX_B = 0x0;
00917     if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
00918       VEX_X = 0x0;
00919     break;
00920   }
00921 
00922   if (Encoding == X86II::VEX || Encoding == X86II::XOP) {
00923     // VEX opcode prefix can have 2 or 3 bytes
00924     //
00925     //  3 bytes:
00926     //    +-----+ +--------------+ +-------------------+
00927     //    | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
00928     //    +-----+ +--------------+ +-------------------+
00929     //  2 bytes:
00930     //    +-----+ +-------------------+
00931     //    | C5h | | R | vvvv | L | pp |
00932     //    +-----+ +-------------------+
00933     //
00934     //  XOP uses a similar prefix:
00935     //    +-----+ +--------------+ +-------------------+
00936     //    | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
00937     //    +-----+ +--------------+ +-------------------+
00938     unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
00939 
00940     // Can we use the 2 byte VEX prefix?
00941     if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) {
00942       EmitByte(0xC5, CurByte, OS);
00943       EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
00944       return;
00945     }
00946 
00947     // 3 byte VEX prefix
00948     EmitByte(Encoding == X86II::XOP ? 0x8F : 0xC4, CurByte, OS);
00949     EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS);
00950     EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
00951   } else {
00952     assert(Encoding == X86II::EVEX && "unknown encoding!");
00953     // EVEX opcode prefix can have 4 bytes
00954     //
00955     // +-----+ +--------------+ +-------------------+ +------------------------+
00956     // | 62h | | RXBR' | 00mm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa |
00957     // +-----+ +--------------+ +-------------------+ +------------------------+
00958     assert((VEX_5M & 0x3) == VEX_5M
00959            && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!");
00960 
00961     VEX_5M &= 0x3;
00962 
00963     EmitByte(0x62, CurByte, OS);
00964     EmitByte((VEX_R   << 7) |
00965              (VEX_X   << 6) |
00966              (VEX_B   << 5) |
00967              (EVEX_R2 << 4) |
00968              VEX_5M, CurByte, OS);
00969     EmitByte((VEX_W   << 7) |
00970              (VEX_4V  << 3) |
00971              (EVEX_U  << 2) |
00972              VEX_PP, CurByte, OS);
00973     if (EncodeRC)
00974       EmitByte((EVEX_z  << 7) |
00975               (EVEX_rc << 5) |
00976               (EVEX_b  << 4) |
00977               (EVEX_V2 << 3) |
00978               EVEX_aaa, CurByte, OS);
00979     else
00980       EmitByte((EVEX_z  << 7) |
00981               (EVEX_L2 << 6) |
00982               (VEX_L   << 5) |
00983               (EVEX_b  << 4) |
00984               (EVEX_V2 << 3) |
00985               EVEX_aaa, CurByte, OS);
00986   }
00987 }
00988 
00989 /// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
00990 /// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
00991 /// size, and 3) use of X86-64 extended registers.
00992 static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
00993                                    const MCInstrDesc &Desc) {
00994   unsigned REX = 0;
00995   if (TSFlags & X86II::REX_W)
00996     REX |= 1 << 3; // set REX.W
00997 
00998   if (MI.getNumOperands() == 0) return REX;
00999 
01000   unsigned NumOps = MI.getNumOperands();
01001   // FIXME: MCInst should explicitize the two-addrness.
01002   bool isTwoAddr = NumOps > 1 &&
01003                       Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1;
01004 
01005   // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
01006   unsigned i = isTwoAddr ? 1 : 0;
01007   for (; i != NumOps; ++i) {
01008     const MCOperand &MO = MI.getOperand(i);
01009     if (!MO.isReg()) continue;
01010     unsigned Reg = MO.getReg();
01011     if (!X86II::isX86_64NonExtLowByteReg(Reg)) continue;
01012     // FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything
01013     // that returns non-zero.
01014     REX |= 0x40; // REX fixed encoding prefix
01015     break;
01016   }
01017 
01018   switch (TSFlags & X86II::FormMask) {
01019   case X86II::MRMSrcReg:
01020     if (MI.getOperand(0).isReg() &&
01021         X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
01022       REX |= 1 << 2; // set REX.R
01023     i = isTwoAddr ? 2 : 1;
01024     for (; i != NumOps; ++i) {
01025       const MCOperand &MO = MI.getOperand(i);
01026       if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
01027         REX |= 1 << 0; // set REX.B
01028     }
01029     break;
01030   case X86II::MRMSrcMem: {
01031     if (MI.getOperand(0).isReg() &&
01032         X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
01033       REX |= 1 << 2; // set REX.R
01034     unsigned Bit = 0;
01035     i = isTwoAddr ? 2 : 1;
01036     for (; i != NumOps; ++i) {
01037       const MCOperand &MO = MI.getOperand(i);
01038       if (MO.isReg()) {
01039         if (X86II::isX86_64ExtendedReg(MO.getReg()))
01040           REX |= 1 << Bit; // set REX.B (Bit=0) and REX.X (Bit=1)
01041         Bit++;
01042       }
01043     }
01044     break;
01045   }
01046   case X86II::MRMXm:
01047   case X86II::MRM0m: case X86II::MRM1m:
01048   case X86II::MRM2m: case X86II::MRM3m:
01049   case X86II::MRM4m: case X86II::MRM5m:
01050   case X86II::MRM6m: case X86II::MRM7m:
01051   case X86II::MRMDestMem: {
01052     unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands);
01053     i = isTwoAddr ? 1 : 0;
01054     if (NumOps > e && MI.getOperand(e).isReg() &&
01055         X86II::isX86_64ExtendedReg(MI.getOperand(e).getReg()))
01056       REX |= 1 << 2; // set REX.R
01057     unsigned Bit = 0;
01058     for (; i != e; ++i) {
01059       const MCOperand &MO = MI.getOperand(i);
01060       if (MO.isReg()) {
01061         if (X86II::isX86_64ExtendedReg(MO.getReg()))
01062           REX |= 1 << Bit; // REX.B (Bit=0) and REX.X (Bit=1)
01063         Bit++;
01064       }
01065     }
01066     break;
01067   }
01068   default:
01069     if (MI.getOperand(0).isReg() &&
01070         X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
01071       REX |= 1 << 0; // set REX.B
01072     i = isTwoAddr ? 2 : 1;
01073     for (unsigned e = NumOps; i != e; ++i) {
01074       const MCOperand &MO = MI.getOperand(i);
01075       if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
01076         REX |= 1 << 2; // set REX.R
01077     }
01078     break;
01079   }
01080   return REX;
01081 }
01082 
01083 /// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
01084 void X86MCCodeEmitter::EmitSegmentOverridePrefix(unsigned &CurByte,
01085                                                  unsigned SegOperand,
01086                                                  const MCInst &MI,
01087                                                  raw_ostream &OS) const {
01088   // Check for explicit segment override on memory operand.
01089   switch (MI.getOperand(SegOperand).getReg()) {
01090   default: llvm_unreachable("Unknown segment register!");
01091   case 0: break;
01092   case X86::CS: EmitByte(0x2E, CurByte, OS); break;
01093   case X86::SS: EmitByte(0x36, CurByte, OS); break;
01094   case X86::DS: EmitByte(0x3E, CurByte, OS); break;
01095   case X86::ES: EmitByte(0x26, CurByte, OS); break;
01096   case X86::FS: EmitByte(0x64, CurByte, OS); break;
01097   case X86::GS: EmitByte(0x65, CurByte, OS); break;
01098   }
01099 }
01100 
01101 /// EmitOpcodePrefix - Emit all instruction prefixes prior to the opcode.
01102 ///
01103 /// MemOperand is the operand # of the start of a memory operand if present.  If
01104 /// Not present, it is -1.
01105 void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
01106                                         int MemOperand, const MCInst &MI,
01107                                         const MCInstrDesc &Desc,
01108                                         const MCSubtargetInfo &STI,
01109                                         raw_ostream &OS) const {
01110 
01111   // Emit the operand size opcode prefix as needed.
01112   unsigned char OpSize = (TSFlags & X86II::OpSizeMask) >> X86II::OpSizeShift;
01113   if (OpSize == (is16BitMode(STI) ? X86II::OpSize32 : X86II::OpSize16))
01114     EmitByte(0x66, CurByte, OS);
01115 
01116   switch (TSFlags & X86II::OpPrefixMask) {
01117   case X86II::PD:   // 66
01118     EmitByte(0x66, CurByte, OS);
01119     break;
01120   case X86II::XS:   // F3
01121     EmitByte(0xF3, CurByte, OS);
01122     break;
01123   case X86II::XD:   // F2
01124     EmitByte(0xF2, CurByte, OS);
01125     break;
01126   }
01127 
01128   // Handle REX prefix.
01129   // FIXME: Can this come before F2 etc to simplify emission?
01130   if (is64BitMode(STI)) {
01131     if (unsigned REX = DetermineREXPrefix(MI, TSFlags, Desc))
01132       EmitByte(0x40 | REX, CurByte, OS);
01133   }
01134 
01135   // 0x0F escape code must be emitted just before the opcode.
01136   switch (TSFlags & X86II::OpMapMask) {
01137   case X86II::TB:  // Two-byte opcode map
01138   case X86II::T8:  // 0F 38
01139   case X86II::TA:  // 0F 3A
01140     EmitByte(0x0F, CurByte, OS);
01141     break;
01142   }
01143 
01144   switch (TSFlags & X86II::OpMapMask) {
01145   case X86II::T8:    // 0F 38
01146     EmitByte(0x38, CurByte, OS);
01147     break;
01148   case X86II::TA:    // 0F 3A
01149     EmitByte(0x3A, CurByte, OS);
01150     break;
01151   }
01152 }
01153 
01154 void X86MCCodeEmitter::
01155 EncodeInstruction(const MCInst &MI, raw_ostream &OS,
01156                   SmallVectorImpl<MCFixup> &Fixups,
01157                   const MCSubtargetInfo &STI) const {
01158   unsigned Opcode = MI.getOpcode();
01159   const MCInstrDesc &Desc = MCII.get(Opcode);
01160   uint64_t TSFlags = Desc.TSFlags;
01161 
01162   // Pseudo instructions don't get encoded.
01163   if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
01164     return;
01165 
01166   unsigned NumOps = Desc.getNumOperands();
01167   unsigned CurOp = X86II::getOperandBias(Desc);
01168 
01169   // Keep track of the current byte being emitted.
01170   unsigned CurByte = 0;
01171 
01172   // Encoding type for this instruction.
01173   unsigned char Encoding = (TSFlags & X86II::EncodingMask) >>
01174                            X86II::EncodingShift;
01175 
01176   // It uses the VEX.VVVV field?
01177   bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
01178   bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
01179   bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
01180   const unsigned MemOp4_I8IMMOperand = 2;
01181 
01182   // It uses the EVEX.aaa field?
01183   bool HasEVEX_K = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
01184   bool HasEVEX_RC = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_RC);
01185   
01186   // Determine where the memory operand starts, if present.
01187   int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
01188   if (MemoryOperand != -1) MemoryOperand += CurOp;
01189 
01190   // Emit the lock opcode prefix as needed.
01191   if (TSFlags & X86II::LOCK)
01192     EmitByte(0xF0, CurByte, OS);
01193 
01194   // Emit segment override opcode prefix as needed.
01195   if (MemoryOperand >= 0)
01196     EmitSegmentOverridePrefix(CurByte, MemoryOperand+X86::AddrSegmentReg,
01197                               MI, OS);
01198 
01199   // Emit the repeat opcode prefix as needed.
01200   if (TSFlags & X86II::REP)
01201     EmitByte(0xF3, CurByte, OS);
01202 
01203   // Emit the address size opcode prefix as needed.
01204   bool need_address_override;
01205   // The AdSize prefix is only for 32-bit and 64-bit modes. Hm, perhaps we
01206   // should introduce an AdSize16 bit instead of having seven special cases?
01207   if ((!is16BitMode(STI) && TSFlags & X86II::AdSize) ||
01208       (is16BitMode(STI) && (MI.getOpcode() == X86::JECXZ_32 ||
01209                          MI.getOpcode() == X86::MOV8o8a ||
01210                          MI.getOpcode() == X86::MOV16o16a ||
01211                          MI.getOpcode() == X86::MOV32o32a ||
01212                          MI.getOpcode() == X86::MOV8ao8 ||
01213                          MI.getOpcode() == X86::MOV16ao16 ||
01214                          MI.getOpcode() == X86::MOV32ao32))) {
01215     need_address_override = true;
01216   } else if (MemoryOperand < 0) {
01217     need_address_override = false;
01218   } else if (is64BitMode(STI)) {
01219     assert(!Is16BitMemOperand(MI, MemoryOperand, STI));
01220     need_address_override = Is32BitMemOperand(MI, MemoryOperand);
01221   } else if (is32BitMode(STI)) {
01222     assert(!Is64BitMemOperand(MI, MemoryOperand));
01223     need_address_override = Is16BitMemOperand(MI, MemoryOperand, STI);
01224   } else {
01225     assert(is16BitMode(STI));
01226     assert(!Is64BitMemOperand(MI, MemoryOperand));
01227     need_address_override = !Is16BitMemOperand(MI, MemoryOperand, STI);
01228   }
01229 
01230   if (need_address_override)
01231     EmitByte(0x67, CurByte, OS);
01232 
01233   if (Encoding == 0)
01234     EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS);
01235   else
01236     EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
01237 
01238   unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
01239 
01240   if ((TSFlags >> X86II::VEXShift) & X86II::Has3DNow0F0FOpcode)
01241     BaseOpcode = 0x0F;   // Weird 3DNow! encoding.
01242 
01243   unsigned SrcRegNum = 0;
01244   switch (TSFlags & X86II::FormMask) {
01245   default: errs() << "FORM: " << (TSFlags & X86II::FormMask) << "\n";
01246     llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
01247   case X86II::Pseudo:
01248     llvm_unreachable("Pseudo instruction shouldn't be emitted");
01249   case X86II::RawFrmDstSrc: {
01250     unsigned siReg = MI.getOperand(1).getReg();
01251     assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) ||
01252             (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) ||
01253             (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) &&
01254            "SI and DI register sizes do not match");
01255     // Emit segment override opcode prefix as needed (not for %ds).
01256     if (MI.getOperand(2).getReg() != X86::DS)
01257       EmitSegmentOverridePrefix(CurByte, 2, MI, OS);
01258     // Emit AdSize prefix as needed.
01259     if ((!is32BitMode(STI) && siReg == X86::ESI) ||
01260         (is32BitMode(STI) && siReg == X86::SI))
01261       EmitByte(0x67, CurByte, OS);
01262     CurOp += 3; // Consume operands.
01263     EmitByte(BaseOpcode, CurByte, OS);
01264     break;
01265   }
01266   case X86II::RawFrmSrc: {
01267     unsigned siReg = MI.getOperand(0).getReg();
01268     // Emit segment override opcode prefix as needed (not for %ds).
01269     if (MI.getOperand(1).getReg() != X86::DS)
01270       EmitSegmentOverridePrefix(CurByte, 1, MI, OS);
01271     // Emit AdSize prefix as needed.
01272     if ((!is32BitMode(STI) && siReg == X86::ESI) ||
01273         (is32BitMode(STI) && siReg == X86::SI))
01274       EmitByte(0x67, CurByte, OS);
01275     CurOp += 2; // Consume operands.
01276     EmitByte(BaseOpcode, CurByte, OS);
01277     break;
01278   }
01279   case X86II::RawFrmDst: {
01280     unsigned siReg = MI.getOperand(0).getReg();
01281     // Emit AdSize prefix as needed.
01282     if ((!is32BitMode(STI) && siReg == X86::EDI) ||
01283         (is32BitMode(STI) && siReg == X86::DI))
01284       EmitByte(0x67, CurByte, OS);
01285     ++CurOp; // Consume operand.
01286     EmitByte(BaseOpcode, CurByte, OS);
01287     break;
01288   }
01289   case X86II::RawFrm:
01290     EmitByte(BaseOpcode, CurByte, OS);
01291     break;
01292   case X86II::RawFrmMemOffs:
01293     // Emit segment override opcode prefix as needed.
01294     EmitSegmentOverridePrefix(CurByte, 1, MI, OS);
01295     EmitByte(BaseOpcode, CurByte, OS);
01296     EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
01297                   X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
01298                   CurByte, OS, Fixups);
01299     ++CurOp; // skip segment operand
01300     break;
01301   case X86II::RawFrmImm8:
01302     EmitByte(BaseOpcode, CurByte, OS);
01303     EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
01304                   X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
01305                   CurByte, OS, Fixups);
01306     EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, CurByte,
01307                   OS, Fixups);
01308     break;
01309   case X86II::RawFrmImm16:
01310     EmitByte(BaseOpcode, CurByte, OS);
01311     EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
01312                   X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
01313                   CurByte, OS, Fixups);
01314     EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, CurByte,
01315                   OS, Fixups);
01316     break;
01317 
01318   case X86II::AddRegFrm:
01319     EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS);
01320     break;
01321 
01322   case X86II::MRMDestReg:
01323     EmitByte(BaseOpcode, CurByte, OS);
01324     SrcRegNum = CurOp + 1;
01325 
01326     if (HasEVEX_K) // Skip writemask
01327       SrcRegNum++;
01328 
01329     if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
01330       ++SrcRegNum;
01331 
01332     EmitRegModRMByte(MI.getOperand(CurOp),
01333                      GetX86RegNum(MI.getOperand(SrcRegNum)), CurByte, OS);
01334     CurOp = SrcRegNum + 1;
01335     break;
01336 
01337   case X86II::MRMDestMem:
01338     EmitByte(BaseOpcode, CurByte, OS);
01339     SrcRegNum = CurOp + X86::AddrNumOperands;
01340 
01341     if (HasEVEX_K) // Skip writemask
01342       SrcRegNum++;
01343 
01344     if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
01345       ++SrcRegNum;
01346 
01347     EmitMemModRMByte(MI, CurOp,
01348                      GetX86RegNum(MI.getOperand(SrcRegNum)),
01349                      TSFlags, CurByte, OS, Fixups, STI);
01350     CurOp = SrcRegNum + 1;
01351     break;
01352 
01353   case X86II::MRMSrcReg:
01354     EmitByte(BaseOpcode, CurByte, OS);
01355     SrcRegNum = CurOp + 1;
01356 
01357     if (HasEVEX_K) // Skip writemask
01358       SrcRegNum++;
01359 
01360     if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
01361       ++SrcRegNum;
01362 
01363     if (HasMemOp4) // Skip 2nd src (which is encoded in I8IMM)
01364       ++SrcRegNum;
01365 
01366     EmitRegModRMByte(MI.getOperand(SrcRegNum),
01367                      GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
01368 
01369     // 2 operands skipped with HasMemOp4, compensate accordingly
01370     CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1;
01371     if (HasVEX_4VOp3)
01372       ++CurOp;
01373     // do not count the rounding control operand
01374     if (HasEVEX_RC)
01375       NumOps--;
01376     break;
01377 
01378   case X86II::MRMSrcMem: {
01379     int AddrOperands = X86::AddrNumOperands;
01380     unsigned FirstMemOp = CurOp+1;
01381 
01382     if (HasEVEX_K) { // Skip writemask
01383       ++AddrOperands;
01384       ++FirstMemOp;
01385     }
01386 
01387     if (HasVEX_4V) {
01388       ++AddrOperands;
01389       ++FirstMemOp;  // Skip the register source (which is encoded in VEX_VVVV).
01390     }
01391     if (HasMemOp4) // Skip second register source (encoded in I8IMM)
01392       ++FirstMemOp;
01393 
01394     EmitByte(BaseOpcode, CurByte, OS);
01395 
01396     EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
01397                      TSFlags, CurByte, OS, Fixups, STI);
01398     CurOp += AddrOperands + 1;
01399     if (HasVEX_4VOp3)
01400       ++CurOp;
01401     break;
01402   }
01403 
01404   case X86II::MRMXr:
01405   case X86II::MRM0r: case X86II::MRM1r:
01406   case X86II::MRM2r: case X86II::MRM3r:
01407   case X86II::MRM4r: case X86II::MRM5r:
01408   case X86II::MRM6r: case X86II::MRM7r: {
01409     if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
01410       ++CurOp;
01411     if (HasEVEX_K) // Skip writemask
01412       ++CurOp;
01413     EmitByte(BaseOpcode, CurByte, OS);
01414     uint64_t Form = TSFlags & X86II::FormMask;
01415     EmitRegModRMByte(MI.getOperand(CurOp++),
01416                      (Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r,
01417                      CurByte, OS);
01418     break;
01419   }
01420 
01421   case X86II::MRMXm:
01422   case X86II::MRM0m: case X86II::MRM1m:
01423   case X86II::MRM2m: case X86II::MRM3m:
01424   case X86II::MRM4m: case X86II::MRM5m:
01425   case X86II::MRM6m: case X86II::MRM7m: {
01426     if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
01427       ++CurOp;
01428     if (HasEVEX_K) // Skip writemask
01429       ++CurOp;
01430     EmitByte(BaseOpcode, CurByte, OS);
01431     uint64_t Form = TSFlags & X86II::FormMask;
01432     EmitMemModRMByte(MI, CurOp, (Form == X86II::MRMXm) ? 0 : Form-X86II::MRM0m,
01433                      TSFlags, CurByte, OS, Fixups, STI);
01434     CurOp += X86::AddrNumOperands;
01435     break;
01436   }
01437   case X86II::MRM_C0: case X86II::MRM_C1: case X86II::MRM_C2:
01438   case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C8:
01439   case X86II::MRM_C9: case X86II::MRM_CA: case X86II::MRM_CB:
01440   case X86II::MRM_CF: case X86II::MRM_D0: case X86II::MRM_D1:
01441   case X86II::MRM_D4: case X86II::MRM_D5: case X86II::MRM_D6:
01442   case X86II::MRM_D7: case X86II::MRM_D8: case X86II::MRM_D9:
01443   case X86II::MRM_DA: case X86II::MRM_DB: case X86II::MRM_DC:
01444   case X86II::MRM_DD: case X86II::MRM_DE: case X86II::MRM_DF:
01445   case X86II::MRM_E0: case X86II::MRM_E1: case X86II::MRM_E2:
01446   case X86II::MRM_E3: case X86II::MRM_E4: case X86II::MRM_E5:
01447   case X86II::MRM_E8: case X86II::MRM_E9: case X86II::MRM_EA:
01448   case X86II::MRM_EB: case X86II::MRM_EC: case X86II::MRM_ED:
01449   case X86II::MRM_EE: case X86II::MRM_F0: case X86II::MRM_F1:
01450   case X86II::MRM_F2: case X86II::MRM_F3: case X86II::MRM_F4:
01451   case X86II::MRM_F5: case X86II::MRM_F6: case X86II::MRM_F7:
01452   case X86II::MRM_F8: case X86II::MRM_F9: case X86II::MRM_FA:
01453   case X86II::MRM_FB: case X86II::MRM_FC: case X86II::MRM_FD:
01454   case X86II::MRM_FE: case X86II::MRM_FF:
01455     EmitByte(BaseOpcode, CurByte, OS);
01456 
01457     unsigned char MRM;
01458     switch (TSFlags & X86II::FormMask) {
01459     default: llvm_unreachable("Invalid Form");
01460     case X86II::MRM_C0: MRM = 0xC0; break;
01461     case X86II::MRM_C1: MRM = 0xC1; break;
01462     case X86II::MRM_C2: MRM = 0xC2; break;
01463     case X86II::MRM_C3: MRM = 0xC3; break;
01464     case X86II::MRM_C4: MRM = 0xC4; break;
01465     case X86II::MRM_C8: MRM = 0xC8; break;
01466     case X86II::MRM_C9: MRM = 0xC9; break;
01467     case X86II::MRM_CA: MRM = 0xCA; break;
01468     case X86II::MRM_CB: MRM = 0xCB; break;
01469     case X86II::MRM_CF: MRM = 0xCF; break;
01470     case X86II::MRM_D0: MRM = 0xD0; break;
01471     case X86II::MRM_D1: MRM = 0xD1; break;
01472     case X86II::MRM_D4: MRM = 0xD4; break;
01473     case X86II::MRM_D5: MRM = 0xD5; break;
01474     case X86II::MRM_D6: MRM = 0xD6; break;
01475     case X86II::MRM_D7: MRM = 0xD7; break;
01476     case X86II::MRM_D8: MRM = 0xD8; break;
01477     case X86II::MRM_D9: MRM = 0xD9; break;
01478     case X86II::MRM_DA: MRM = 0xDA; break;
01479     case X86II::MRM_DB: MRM = 0xDB; break;
01480     case X86II::MRM_DC: MRM = 0xDC; break;
01481     case X86II::MRM_DD: MRM = 0xDD; break;
01482     case X86II::MRM_DE: MRM = 0xDE; break;
01483     case X86II::MRM_DF: MRM = 0xDF; break;
01484     case X86II::MRM_E0: MRM = 0xE0; break;
01485     case X86II::MRM_E1: MRM = 0xE1; break;
01486     case X86II::MRM_E2: MRM = 0xE2; break;
01487     case X86II::MRM_E3: MRM = 0xE3; break;
01488     case X86II::MRM_E4: MRM = 0xE4; break;
01489     case X86II::MRM_E5: MRM = 0xE5; break;
01490     case X86II::MRM_E8: MRM = 0xE8; break;
01491     case X86II::MRM_E9: MRM = 0xE9; break;
01492     case X86II::MRM_EA: MRM = 0xEA; break;
01493     case X86II::MRM_EB: MRM = 0xEB; break;
01494     case X86II::MRM_EC: MRM = 0xEC; break;
01495     case X86II::MRM_ED: MRM = 0xED; break;
01496     case X86II::MRM_EE: MRM = 0xEE; break;
01497     case X86II::MRM_F0: MRM = 0xF0; break;
01498     case X86II::MRM_F1: MRM = 0xF1; break;
01499     case X86II::MRM_F2: MRM = 0xF2; break;
01500     case X86II::MRM_F3: MRM = 0xF3; break;
01501     case X86II::MRM_F4: MRM = 0xF4; break;
01502     case X86II::MRM_F5: MRM = 0xF5; break;
01503     case X86II::MRM_F6: MRM = 0xF6; break;
01504     case X86II::MRM_F7: MRM = 0xF7; break;
01505     case X86II::MRM_F8: MRM = 0xF8; break;
01506     case X86II::MRM_F9: MRM = 0xF9; break;
01507     case X86II::MRM_FA: MRM = 0xFA; break;
01508     case X86II::MRM_FB: MRM = 0xFB; break;
01509     case X86II::MRM_FC: MRM = 0xFC; break;
01510     case X86II::MRM_FD: MRM = 0xFD; break;
01511     case X86II::MRM_FE: MRM = 0xFE; break;
01512     case X86II::MRM_FF: MRM = 0xFF; break;
01513     }
01514     EmitByte(MRM, CurByte, OS);
01515     break;
01516   }
01517 
01518   // If there is a remaining operand, it must be a trailing immediate.  Emit it
01519   // according to the right size for the instruction. Some instructions
01520   // (SSE4a extrq and insertq) have two trailing immediates.
01521   while (CurOp != NumOps && NumOps - CurOp <= 2) {
01522     // The last source register of a 4 operand instruction in AVX is encoded
01523     // in bits[7:4] of a immediate byte.
01524     if ((TSFlags >> X86II::VEXShift) & X86II::VEX_I8IMM) {
01525       const MCOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand
01526                                                     : CurOp);
01527       ++CurOp;
01528       unsigned RegNum = GetX86RegNum(MO) << 4;
01529       if (X86II::isX86_64ExtendedReg(MO.getReg()))
01530         RegNum |= 1 << 7;
01531       // If there is an additional 5th operand it must be an immediate, which
01532       // is encoded in bits[3:0]
01533       if (CurOp != NumOps) {
01534         const MCOperand &MIMM = MI.getOperand(CurOp++);
01535         if (MIMM.isImm()) {
01536           unsigned Val = MIMM.getImm();
01537           assert(Val < 16 && "Immediate operand value out of range");
01538           RegNum |= Val;
01539         }
01540       }
01541       EmitImmediate(MCOperand::CreateImm(RegNum), MI.getLoc(), 1, FK_Data_1,
01542                     CurByte, OS, Fixups);
01543     } else {
01544       EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
01545                     X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
01546                     CurByte, OS, Fixups);
01547     }
01548   }
01549 
01550   if ((TSFlags >> X86II::VEXShift) & X86II::Has3DNow0F0FOpcode)
01551     EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS);
01552 
01553 #ifndef NDEBUG
01554   // FIXME: Verify.
01555   if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
01556     errs() << "Cannot encode all operands of: ";
01557     MI.dump();
01558     errs() << '\n';
01559     abort();
01560   }
01561 #endif
01562 }