LLVM API Documentation

X86InstrInfo.cpp
Go to the documentation of this file.
00001 //===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file contains the X86 implementation of the TargetInstrInfo class.
00011 //
00012 //===----------------------------------------------------------------------===//
00013 
00014 #include "X86InstrInfo.h"
00015 #include "X86.h"
00016 #include "X86InstrBuilder.h"
00017 #include "X86MachineFunctionInfo.h"
00018 #include "X86Subtarget.h"
00019 #include "X86TargetMachine.h"
00020 #include "llvm/ADT/STLExtras.h"
00021 #include "llvm/CodeGen/LiveVariables.h"
00022 #include "llvm/CodeGen/MachineConstantPool.h"
00023 #include "llvm/CodeGen/MachineDominators.h"
00024 #include "llvm/CodeGen/MachineFrameInfo.h"
00025 #include "llvm/CodeGen/MachineInstrBuilder.h"
00026 #include "llvm/CodeGen/MachineRegisterInfo.h"
00027 #include "llvm/CodeGen/StackMaps.h"
00028 #include "llvm/IR/DerivedTypes.h"
00029 #include "llvm/IR/Function.h"
00030 #include "llvm/IR/LLVMContext.h"
00031 #include "llvm/MC/MCAsmInfo.h"
00032 #include "llvm/MC/MCExpr.h"
00033 #include "llvm/MC/MCInst.h"
00034 #include "llvm/Support/CommandLine.h"
00035 #include "llvm/Support/Debug.h"
00036 #include "llvm/Support/ErrorHandling.h"
00037 #include "llvm/Support/raw_ostream.h"
00038 #include "llvm/Target/TargetOptions.h"
00039 #include <limits>
00040 
00041 using namespace llvm;
00042 
00043 #define DEBUG_TYPE "x86-instr-info"
00044 
00045 #define GET_INSTRINFO_CTOR_DTOR
00046 #include "X86GenInstrInfo.inc"
00047 
00048 static cl::opt<bool>
00049 NoFusing("disable-spill-fusing",
00050          cl::desc("Disable fusing of spill code into instructions"));
00051 static cl::opt<bool>
00052 PrintFailedFusing("print-failed-fuse-candidates",
00053                   cl::desc("Print instructions that the allocator wants to"
00054                            " fuse, but the X86 backend currently can't"),
00055                   cl::Hidden);
00056 static cl::opt<bool>
00057 ReMatPICStubLoad("remat-pic-stub-load",
00058                  cl::desc("Re-materialize load from stub in PIC mode"),
00059                  cl::init(false), cl::Hidden);
00060 
00061 enum {
00062   // Select which memory operand is being unfolded.
00063   // (stored in bits 0 - 3)
00064   TB_INDEX_0    = 0,
00065   TB_INDEX_1    = 1,
00066   TB_INDEX_2    = 2,
00067   TB_INDEX_3    = 3,
00068   TB_INDEX_MASK = 0xf,
00069 
00070   // Do not insert the reverse map (MemOp -> RegOp) into the table.
00071   // This may be needed because there is a many -> one mapping.
00072   TB_NO_REVERSE   = 1 << 4,
00073 
00074   // Do not insert the forward map (RegOp -> MemOp) into the table.
00075   // This is needed for Native Client, which prohibits branch
00076   // instructions from using a memory operand.
00077   TB_NO_FORWARD   = 1 << 5,
00078 
00079   TB_FOLDED_LOAD  = 1 << 6,
00080   TB_FOLDED_STORE = 1 << 7,
00081 
00082   // Minimum alignment required for load/store.
00083   // Used for RegOp->MemOp conversion.
00084   // (stored in bits 8 - 15)
00085   TB_ALIGN_SHIFT = 8,
00086   TB_ALIGN_NONE  =    0 << TB_ALIGN_SHIFT,
00087   TB_ALIGN_16    =   16 << TB_ALIGN_SHIFT,
00088   TB_ALIGN_32    =   32 << TB_ALIGN_SHIFT,
00089   TB_ALIGN_64    =   64 << TB_ALIGN_SHIFT,
00090   TB_ALIGN_MASK  = 0xff << TB_ALIGN_SHIFT
00091 };
00092 
00093 struct X86OpTblEntry {
00094   uint16_t RegOp;
00095   uint16_t MemOp;
00096   uint16_t Flags;
00097 };
00098 
00099 // Pin the vtable to this file.
00100 void X86InstrInfo::anchor() {}
00101 
00102 X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
00103     : X86GenInstrInfo(
00104           (STI.is64Bit() ? X86::ADJCALLSTACKDOWN64 : X86::ADJCALLSTACKDOWN32),
00105           (STI.is64Bit() ? X86::ADJCALLSTACKUP64 : X86::ADJCALLSTACKUP32)),
00106       Subtarget(STI), RI(STI) {
00107 
00108   static const X86OpTblEntry OpTbl2Addr[] = {
00109     { X86::ADC32ri,     X86::ADC32mi,    0 },
00110     { X86::ADC32ri8,    X86::ADC32mi8,   0 },
00111     { X86::ADC32rr,     X86::ADC32mr,    0 },
00112     { X86::ADC64ri32,   X86::ADC64mi32,  0 },
00113     { X86::ADC64ri8,    X86::ADC64mi8,   0 },
00114     { X86::ADC64rr,     X86::ADC64mr,    0 },
00115     { X86::ADD16ri,     X86::ADD16mi,    0 },
00116     { X86::ADD16ri8,    X86::ADD16mi8,   0 },
00117     { X86::ADD16ri_DB,  X86::ADD16mi,    TB_NO_REVERSE },
00118     { X86::ADD16ri8_DB, X86::ADD16mi8,   TB_NO_REVERSE },
00119     { X86::ADD16rr,     X86::ADD16mr,    0 },
00120     { X86::ADD16rr_DB,  X86::ADD16mr,    TB_NO_REVERSE },
00121     { X86::ADD32ri,     X86::ADD32mi,    0 },
00122     { X86::ADD32ri8,    X86::ADD32mi8,   0 },
00123     { X86::ADD32ri_DB,  X86::ADD32mi,    TB_NO_REVERSE },
00124     { X86::ADD32ri8_DB, X86::ADD32mi8,   TB_NO_REVERSE },
00125     { X86::ADD32rr,     X86::ADD32mr,    0 },
00126     { X86::ADD32rr_DB,  X86::ADD32mr,    TB_NO_REVERSE },
00127     { X86::ADD64ri32,   X86::ADD64mi32,  0 },
00128     { X86::ADD64ri8,    X86::ADD64mi8,   0 },
00129     { X86::ADD64ri32_DB,X86::ADD64mi32,  TB_NO_REVERSE },
00130     { X86::ADD64ri8_DB, X86::ADD64mi8,   TB_NO_REVERSE },
00131     { X86::ADD64rr,     X86::ADD64mr,    0 },
00132     { X86::ADD64rr_DB,  X86::ADD64mr,    TB_NO_REVERSE },
00133     { X86::ADD8ri,      X86::ADD8mi,     0 },
00134     { X86::ADD8rr,      X86::ADD8mr,     0 },
00135     { X86::AND16ri,     X86::AND16mi,    0 },
00136     { X86::AND16ri8,    X86::AND16mi8,   0 },
00137     { X86::AND16rr,     X86::AND16mr,    0 },
00138     { X86::AND32ri,     X86::AND32mi,    0 },
00139     { X86::AND32ri8,    X86::AND32mi8,   0 },
00140     { X86::AND32rr,     X86::AND32mr,    0 },
00141     { X86::AND64ri32,   X86::AND64mi32,  0 },
00142     { X86::AND64ri8,    X86::AND64mi8,   0 },
00143     { X86::AND64rr,     X86::AND64mr,    0 },
00144     { X86::AND8ri,      X86::AND8mi,     0 },
00145     { X86::AND8rr,      X86::AND8mr,     0 },
00146     { X86::DEC16r,      X86::DEC16m,     0 },
00147     { X86::DEC32r,      X86::DEC32m,     0 },
00148     { X86::DEC64_16r,   X86::DEC64_16m,  0 },
00149     { X86::DEC64_32r,   X86::DEC64_32m,  0 },
00150     { X86::DEC64r,      X86::DEC64m,     0 },
00151     { X86::DEC8r,       X86::DEC8m,      0 },
00152     { X86::INC16r,      X86::INC16m,     0 },
00153     { X86::INC32r,      X86::INC32m,     0 },
00154     { X86::INC64_16r,   X86::INC64_16m,  0 },
00155     { X86::INC64_32r,   X86::INC64_32m,  0 },
00156     { X86::INC64r,      X86::INC64m,     0 },
00157     { X86::INC8r,       X86::INC8m,      0 },
00158     { X86::NEG16r,      X86::NEG16m,     0 },
00159     { X86::NEG32r,      X86::NEG32m,     0 },
00160     { X86::NEG64r,      X86::NEG64m,     0 },
00161     { X86::NEG8r,       X86::NEG8m,      0 },
00162     { X86::NOT16r,      X86::NOT16m,     0 },
00163     { X86::NOT32r,      X86::NOT32m,     0 },
00164     { X86::NOT64r,      X86::NOT64m,     0 },
00165     { X86::NOT8r,       X86::NOT8m,      0 },
00166     { X86::OR16ri,      X86::OR16mi,     0 },
00167     { X86::OR16ri8,     X86::OR16mi8,    0 },
00168     { X86::OR16rr,      X86::OR16mr,     0 },
00169     { X86::OR32ri,      X86::OR32mi,     0 },
00170     { X86::OR32ri8,     X86::OR32mi8,    0 },
00171     { X86::OR32rr,      X86::OR32mr,     0 },
00172     { X86::OR64ri32,    X86::OR64mi32,   0 },
00173     { X86::OR64ri8,     X86::OR64mi8,    0 },
00174     { X86::OR64rr,      X86::OR64mr,     0 },
00175     { X86::OR8ri,       X86::OR8mi,      0 },
00176     { X86::OR8rr,       X86::OR8mr,      0 },
00177     { X86::ROL16r1,     X86::ROL16m1,    0 },
00178     { X86::ROL16rCL,    X86::ROL16mCL,   0 },
00179     { X86::ROL16ri,     X86::ROL16mi,    0 },
00180     { X86::ROL32r1,     X86::ROL32m1,    0 },
00181     { X86::ROL32rCL,    X86::ROL32mCL,   0 },
00182     { X86::ROL32ri,     X86::ROL32mi,    0 },
00183     { X86::ROL64r1,     X86::ROL64m1,    0 },
00184     { X86::ROL64rCL,    X86::ROL64mCL,   0 },
00185     { X86::ROL64ri,     X86::ROL64mi,    0 },
00186     { X86::ROL8r1,      X86::ROL8m1,     0 },
00187     { X86::ROL8rCL,     X86::ROL8mCL,    0 },
00188     { X86::ROL8ri,      X86::ROL8mi,     0 },
00189     { X86::ROR16r1,     X86::ROR16m1,    0 },
00190     { X86::ROR16rCL,    X86::ROR16mCL,   0 },
00191     { X86::ROR16ri,     X86::ROR16mi,    0 },
00192     { X86::ROR32r1,     X86::ROR32m1,    0 },
00193     { X86::ROR32rCL,    X86::ROR32mCL,   0 },
00194     { X86::ROR32ri,     X86::ROR32mi,    0 },
00195     { X86::ROR64r1,     X86::ROR64m1,    0 },
00196     { X86::ROR64rCL,    X86::ROR64mCL,   0 },
00197     { X86::ROR64ri,     X86::ROR64mi,    0 },
00198     { X86::ROR8r1,      X86::ROR8m1,     0 },
00199     { X86::ROR8rCL,     X86::ROR8mCL,    0 },
00200     { X86::ROR8ri,      X86::ROR8mi,     0 },
00201     { X86::SAR16r1,     X86::SAR16m1,    0 },
00202     { X86::SAR16rCL,    X86::SAR16mCL,   0 },
00203     { X86::SAR16ri,     X86::SAR16mi,    0 },
00204     { X86::SAR32r1,     X86::SAR32m1,    0 },
00205     { X86::SAR32rCL,    X86::SAR32mCL,   0 },
00206     { X86::SAR32ri,     X86::SAR32mi,    0 },
00207     { X86::SAR64r1,     X86::SAR64m1,    0 },
00208     { X86::SAR64rCL,    X86::SAR64mCL,   0 },
00209     { X86::SAR64ri,     X86::SAR64mi,    0 },
00210     { X86::SAR8r1,      X86::SAR8m1,     0 },
00211     { X86::SAR8rCL,     X86::SAR8mCL,    0 },
00212     { X86::SAR8ri,      X86::SAR8mi,     0 },
00213     { X86::SBB32ri,     X86::SBB32mi,    0 },
00214     { X86::SBB32ri8,    X86::SBB32mi8,   0 },
00215     { X86::SBB32rr,     X86::SBB32mr,    0 },
00216     { X86::SBB64ri32,   X86::SBB64mi32,  0 },
00217     { X86::SBB64ri8,    X86::SBB64mi8,   0 },
00218     { X86::SBB64rr,     X86::SBB64mr,    0 },
00219     { X86::SHL16rCL,    X86::SHL16mCL,   0 },
00220     { X86::SHL16ri,     X86::SHL16mi,    0 },
00221     { X86::SHL32rCL,    X86::SHL32mCL,   0 },
00222     { X86::SHL32ri,     X86::SHL32mi,    0 },
00223     { X86::SHL64rCL,    X86::SHL64mCL,   0 },
00224     { X86::SHL64ri,     X86::SHL64mi,    0 },
00225     { X86::SHL8rCL,     X86::SHL8mCL,    0 },
00226     { X86::SHL8ri,      X86::SHL8mi,     0 },
00227     { X86::SHLD16rrCL,  X86::SHLD16mrCL, 0 },
00228     { X86::SHLD16rri8,  X86::SHLD16mri8, 0 },
00229     { X86::SHLD32rrCL,  X86::SHLD32mrCL, 0 },
00230     { X86::SHLD32rri8,  X86::SHLD32mri8, 0 },
00231     { X86::SHLD64rrCL,  X86::SHLD64mrCL, 0 },
00232     { X86::SHLD64rri8,  X86::SHLD64mri8, 0 },
00233     { X86::SHR16r1,     X86::SHR16m1,    0 },
00234     { X86::SHR16rCL,    X86::SHR16mCL,   0 },
00235     { X86::SHR16ri,     X86::SHR16mi,    0 },
00236     { X86::SHR32r1,     X86::SHR32m1,    0 },
00237     { X86::SHR32rCL,    X86::SHR32mCL,   0 },
00238     { X86::SHR32ri,     X86::SHR32mi,    0 },
00239     { X86::SHR64r1,     X86::SHR64m1,    0 },
00240     { X86::SHR64rCL,    X86::SHR64mCL,   0 },
00241     { X86::SHR64ri,     X86::SHR64mi,    0 },
00242     { X86::SHR8r1,      X86::SHR8m1,     0 },
00243     { X86::SHR8rCL,     X86::SHR8mCL,    0 },
00244     { X86::SHR8ri,      X86::SHR8mi,     0 },
00245     { X86::SHRD16rrCL,  X86::SHRD16mrCL, 0 },
00246     { X86::SHRD16rri8,  X86::SHRD16mri8, 0 },
00247     { X86::SHRD32rrCL,  X86::SHRD32mrCL, 0 },
00248     { X86::SHRD32rri8,  X86::SHRD32mri8, 0 },
00249     { X86::SHRD64rrCL,  X86::SHRD64mrCL, 0 },
00250     { X86::SHRD64rri8,  X86::SHRD64mri8, 0 },
00251     { X86::SUB16ri,     X86::SUB16mi,    0 },
00252     { X86::SUB16ri8,    X86::SUB16mi8,   0 },
00253     { X86::SUB16rr,     X86::SUB16mr,    0 },
00254     { X86::SUB32ri,     X86::SUB32mi,    0 },
00255     { X86::SUB32ri8,    X86::SUB32mi8,   0 },
00256     { X86::SUB32rr,     X86::SUB32mr,    0 },
00257     { X86::SUB64ri32,   X86::SUB64mi32,  0 },
00258     { X86::SUB64ri8,    X86::SUB64mi8,   0 },
00259     { X86::SUB64rr,     X86::SUB64mr,    0 },
00260     { X86::SUB8ri,      X86::SUB8mi,     0 },
00261     { X86::SUB8rr,      X86::SUB8mr,     0 },
00262     { X86::XOR16ri,     X86::XOR16mi,    0 },
00263     { X86::XOR16ri8,    X86::XOR16mi8,   0 },
00264     { X86::XOR16rr,     X86::XOR16mr,    0 },
00265     { X86::XOR32ri,     X86::XOR32mi,    0 },
00266     { X86::XOR32ri8,    X86::XOR32mi8,   0 },
00267     { X86::XOR32rr,     X86::XOR32mr,    0 },
00268     { X86::XOR64ri32,   X86::XOR64mi32,  0 },
00269     { X86::XOR64ri8,    X86::XOR64mi8,   0 },
00270     { X86::XOR64rr,     X86::XOR64mr,    0 },
00271     { X86::XOR8ri,      X86::XOR8mi,     0 },
00272     { X86::XOR8rr,      X86::XOR8mr,     0 }
00273   };
00274 
00275   for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) {
00276     unsigned RegOp = OpTbl2Addr[i].RegOp;
00277     unsigned MemOp = OpTbl2Addr[i].MemOp;
00278     unsigned Flags = OpTbl2Addr[i].Flags;
00279     AddTableEntry(RegOp2MemOpTable2Addr, MemOp2RegOpTable,
00280                   RegOp, MemOp,
00281                   // Index 0, folded load and store, no alignment requirement.
00282                   Flags | TB_INDEX_0 | TB_FOLDED_LOAD | TB_FOLDED_STORE);
00283   }
00284 
00285   static const X86OpTblEntry OpTbl0[] = {
00286     { X86::BT16ri8,     X86::BT16mi8,       TB_FOLDED_LOAD },
00287     { X86::BT32ri8,     X86::BT32mi8,       TB_FOLDED_LOAD },
00288     { X86::BT64ri8,     X86::BT64mi8,       TB_FOLDED_LOAD },
00289     { X86::CALL32r,     X86::CALL32m,       TB_FOLDED_LOAD },
00290     { X86::CALL64r,     X86::CALL64m,       TB_FOLDED_LOAD },
00291     { X86::CMP16ri,     X86::CMP16mi,       TB_FOLDED_LOAD },
00292     { X86::CMP16ri8,    X86::CMP16mi8,      TB_FOLDED_LOAD },
00293     { X86::CMP16rr,     X86::CMP16mr,       TB_FOLDED_LOAD },
00294     { X86::CMP32ri,     X86::CMP32mi,       TB_FOLDED_LOAD },
00295     { X86::CMP32ri8,    X86::CMP32mi8,      TB_FOLDED_LOAD },
00296     { X86::CMP32rr,     X86::CMP32mr,       TB_FOLDED_LOAD },
00297     { X86::CMP64ri32,   X86::CMP64mi32,     TB_FOLDED_LOAD },
00298     { X86::CMP64ri8,    X86::CMP64mi8,      TB_FOLDED_LOAD },
00299     { X86::CMP64rr,     X86::CMP64mr,       TB_FOLDED_LOAD },
00300     { X86::CMP8ri,      X86::CMP8mi,        TB_FOLDED_LOAD },
00301     { X86::CMP8rr,      X86::CMP8mr,        TB_FOLDED_LOAD },
00302     { X86::DIV16r,      X86::DIV16m,        TB_FOLDED_LOAD },
00303     { X86::DIV32r,      X86::DIV32m,        TB_FOLDED_LOAD },
00304     { X86::DIV64r,      X86::DIV64m,        TB_FOLDED_LOAD },
00305     { X86::DIV8r,       X86::DIV8m,         TB_FOLDED_LOAD },
00306     { X86::EXTRACTPSrr, X86::EXTRACTPSmr,   TB_FOLDED_STORE },
00307     { X86::IDIV16r,     X86::IDIV16m,       TB_FOLDED_LOAD },
00308     { X86::IDIV32r,     X86::IDIV32m,       TB_FOLDED_LOAD },
00309     { X86::IDIV64r,     X86::IDIV64m,       TB_FOLDED_LOAD },
00310     { X86::IDIV8r,      X86::IDIV8m,        TB_FOLDED_LOAD },
00311     { X86::IMUL16r,     X86::IMUL16m,       TB_FOLDED_LOAD },
00312     { X86::IMUL32r,     X86::IMUL32m,       TB_FOLDED_LOAD },
00313     { X86::IMUL64r,     X86::IMUL64m,       TB_FOLDED_LOAD },
00314     { X86::IMUL8r,      X86::IMUL8m,        TB_FOLDED_LOAD },
00315     { X86::JMP32r,      X86::JMP32m,        TB_FOLDED_LOAD },
00316     { X86::JMP64r,      X86::JMP64m,        TB_FOLDED_LOAD },
00317     { X86::MOV16ri,     X86::MOV16mi,       TB_FOLDED_STORE },
00318     { X86::MOV16rr,     X86::MOV16mr,       TB_FOLDED_STORE },
00319     { X86::MOV32ri,     X86::MOV32mi,       TB_FOLDED_STORE },
00320     { X86::MOV32rr,     X86::MOV32mr,       TB_FOLDED_STORE },
00321     { X86::MOV64ri32,   X86::MOV64mi32,     TB_FOLDED_STORE },
00322     { X86::MOV64rr,     X86::MOV64mr,       TB_FOLDED_STORE },
00323     { X86::MOV8ri,      X86::MOV8mi,        TB_FOLDED_STORE },
00324     { X86::MOV8rr,      X86::MOV8mr,        TB_FOLDED_STORE },
00325     { X86::MOV8rr_NOREX, X86::MOV8mr_NOREX, TB_FOLDED_STORE },
00326     { X86::MOVAPDrr,    X86::MOVAPDmr,      TB_FOLDED_STORE | TB_ALIGN_16 },
00327     { X86::MOVAPSrr,    X86::MOVAPSmr,      TB_FOLDED_STORE | TB_ALIGN_16 },
00328     { X86::MOVDQArr,    X86::MOVDQAmr,      TB_FOLDED_STORE | TB_ALIGN_16 },
00329     { X86::MOVPDI2DIrr, X86::MOVPDI2DImr,   TB_FOLDED_STORE },
00330     { X86::MOVPQIto64rr,X86::MOVPQI2QImr,   TB_FOLDED_STORE },
00331     { X86::MOVSDto64rr, X86::MOVSDto64mr,   TB_FOLDED_STORE },
00332     { X86::MOVSS2DIrr,  X86::MOVSS2DImr,    TB_FOLDED_STORE },
00333     { X86::MOVUPDrr,    X86::MOVUPDmr,      TB_FOLDED_STORE },
00334     { X86::MOVUPSrr,    X86::MOVUPSmr,      TB_FOLDED_STORE },
00335     { X86::MUL16r,      X86::MUL16m,        TB_FOLDED_LOAD },
00336     { X86::MUL32r,      X86::MUL32m,        TB_FOLDED_LOAD },
00337     { X86::MUL64r,      X86::MUL64m,        TB_FOLDED_LOAD },
00338     { X86::MUL8r,       X86::MUL8m,         TB_FOLDED_LOAD },
00339     { X86::SETAEr,      X86::SETAEm,        TB_FOLDED_STORE },
00340     { X86::SETAr,       X86::SETAm,         TB_FOLDED_STORE },
00341     { X86::SETBEr,      X86::SETBEm,        TB_FOLDED_STORE },
00342     { X86::SETBr,       X86::SETBm,         TB_FOLDED_STORE },
00343     { X86::SETEr,       X86::SETEm,         TB_FOLDED_STORE },
00344     { X86::SETGEr,      X86::SETGEm,        TB_FOLDED_STORE },
00345     { X86::SETGr,       X86::SETGm,         TB_FOLDED_STORE },
00346     { X86::SETLEr,      X86::SETLEm,        TB_FOLDED_STORE },
00347     { X86::SETLr,       X86::SETLm,         TB_FOLDED_STORE },
00348     { X86::SETNEr,      X86::SETNEm,        TB_FOLDED_STORE },
00349     { X86::SETNOr,      X86::SETNOm,        TB_FOLDED_STORE },
00350     { X86::SETNPr,      X86::SETNPm,        TB_FOLDED_STORE },
00351     { X86::SETNSr,      X86::SETNSm,        TB_FOLDED_STORE },
00352     { X86::SETOr,       X86::SETOm,         TB_FOLDED_STORE },
00353     { X86::SETPr,       X86::SETPm,         TB_FOLDED_STORE },
00354     { X86::SETSr,       X86::SETSm,         TB_FOLDED_STORE },
00355     { X86::TAILJMPr,    X86::TAILJMPm,      TB_FOLDED_LOAD },
00356     { X86::TAILJMPr64,  X86::TAILJMPm64,    TB_FOLDED_LOAD },
00357     { X86::TEST16ri,    X86::TEST16mi,      TB_FOLDED_LOAD },
00358     { X86::TEST32ri,    X86::TEST32mi,      TB_FOLDED_LOAD },
00359     { X86::TEST64ri32,  X86::TEST64mi32,    TB_FOLDED_LOAD },
00360     { X86::TEST8ri,     X86::TEST8mi,       TB_FOLDED_LOAD },
00361     // AVX 128-bit versions of foldable instructions
00362     { X86::VEXTRACTPSrr,X86::VEXTRACTPSmr,  TB_FOLDED_STORE  },
00363     { X86::VEXTRACTF128rr, X86::VEXTRACTF128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
00364     { X86::VMOVAPDrr,   X86::VMOVAPDmr,     TB_FOLDED_STORE | TB_ALIGN_16 },
00365     { X86::VMOVAPSrr,   X86::VMOVAPSmr,     TB_FOLDED_STORE | TB_ALIGN_16 },
00366     { X86::VMOVDQArr,   X86::VMOVDQAmr,     TB_FOLDED_STORE | TB_ALIGN_16 },
00367     { X86::VMOVPDI2DIrr,X86::VMOVPDI2DImr,  TB_FOLDED_STORE },
00368     { X86::VMOVPQIto64rr, X86::VMOVPQI2QImr,TB_FOLDED_STORE },
00369     { X86::VMOVSDto64rr,X86::VMOVSDto64mr,  TB_FOLDED_STORE },
00370     { X86::VMOVSS2DIrr, X86::VMOVSS2DImr,   TB_FOLDED_STORE },
00371     { X86::VMOVUPDrr,   X86::VMOVUPDmr,     TB_FOLDED_STORE },
00372     { X86::VMOVUPSrr,   X86::VMOVUPSmr,     TB_FOLDED_STORE },
00373     // AVX 256-bit foldable instructions
00374     { X86::VEXTRACTI128rr, X86::VEXTRACTI128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
00375     { X86::VMOVAPDYrr,  X86::VMOVAPDYmr,    TB_FOLDED_STORE | TB_ALIGN_32 },
00376     { X86::VMOVAPSYrr,  X86::VMOVAPSYmr,    TB_FOLDED_STORE | TB_ALIGN_32 },
00377     { X86::VMOVDQAYrr,  X86::VMOVDQAYmr,    TB_FOLDED_STORE | TB_ALIGN_32 },
00378     { X86::VMOVUPDYrr,  X86::VMOVUPDYmr,    TB_FOLDED_STORE },
00379     { X86::VMOVUPSYrr,  X86::VMOVUPSYmr,    TB_FOLDED_STORE },
00380     // AVX-512 foldable instructions
00381     { X86::VMOVPDI2DIZrr,   X86::VMOVPDI2DIZmr, TB_FOLDED_STORE },
00382     { X86::VMOVAPDZrr,      X86::VMOVAPDZmr,    TB_FOLDED_STORE | TB_ALIGN_64 },
00383     { X86::VMOVAPSZrr,      X86::VMOVAPSZmr,    TB_FOLDED_STORE | TB_ALIGN_64 },
00384     { X86::VMOVDQA32Zrr,    X86::VMOVDQA32Zmr,  TB_FOLDED_STORE | TB_ALIGN_64 },
00385     { X86::VMOVDQA64Zrr,    X86::VMOVDQA64Zmr,  TB_FOLDED_STORE | TB_ALIGN_64 },
00386     { X86::VMOVUPDZrr,      X86::VMOVUPDZmr,    TB_FOLDED_STORE },
00387     { X86::VMOVUPSZrr,      X86::VMOVUPSZmr,    TB_FOLDED_STORE },
00388     { X86::VMOVDQU32Zrr,    X86::VMOVDQU32Zmr,  TB_FOLDED_STORE },
00389     { X86::VMOVDQU64Zrr,    X86::VMOVDQU64Zmr,  TB_FOLDED_STORE }
00390   };
00391 
00392   for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) {
00393     unsigned RegOp      = OpTbl0[i].RegOp;
00394     unsigned MemOp      = OpTbl0[i].MemOp;
00395     unsigned Flags      = OpTbl0[i].Flags;
00396     AddTableEntry(RegOp2MemOpTable0, MemOp2RegOpTable,
00397                   RegOp, MemOp, TB_INDEX_0 | Flags);
00398   }
00399 
00400   static const X86OpTblEntry OpTbl1[] = {
00401     { X86::CMP16rr,         X86::CMP16rm,             0 },
00402     { X86::CMP32rr,         X86::CMP32rm,             0 },
00403     { X86::CMP64rr,         X86::CMP64rm,             0 },
00404     { X86::CMP8rr,          X86::CMP8rm,              0 },
00405     { X86::CVTSD2SSrr,      X86::CVTSD2SSrm,          0 },
00406     { X86::CVTSI2SD64rr,    X86::CVTSI2SD64rm,        0 },
00407     { X86::CVTSI2SDrr,      X86::CVTSI2SDrm,          0 },
00408     { X86::CVTSI2SS64rr,    X86::CVTSI2SS64rm,        0 },
00409     { X86::CVTSI2SSrr,      X86::CVTSI2SSrm,          0 },
00410     { X86::CVTSS2SDrr,      X86::CVTSS2SDrm,          0 },
00411     { X86::CVTTSD2SI64rr,   X86::CVTTSD2SI64rm,       0 },
00412     { X86::CVTTSD2SIrr,     X86::CVTTSD2SIrm,         0 },
00413     { X86::CVTTSS2SI64rr,   X86::CVTTSS2SI64rm,       0 },
00414     { X86::CVTTSS2SIrr,     X86::CVTTSS2SIrm,         0 },
00415     { X86::IMUL16rri,       X86::IMUL16rmi,           0 },
00416     { X86::IMUL16rri8,      X86::IMUL16rmi8,          0 },
00417     { X86::IMUL32rri,       X86::IMUL32rmi,           0 },
00418     { X86::IMUL32rri8,      X86::IMUL32rmi8,          0 },
00419     { X86::IMUL64rri32,     X86::IMUL64rmi32,         0 },
00420     { X86::IMUL64rri8,      X86::IMUL64rmi8,          0 },
00421     { X86::Int_COMISDrr,    X86::Int_COMISDrm,        0 },
00422     { X86::Int_COMISSrr,    X86::Int_COMISSrm,        0 },
00423     { X86::CVTSD2SI64rr,    X86::CVTSD2SI64rm,        0 },
00424     { X86::CVTSD2SIrr,      X86::CVTSD2SIrm,          0 },
00425     { X86::CVTSS2SI64rr,    X86::CVTSS2SI64rm,        0 },
00426     { X86::CVTSS2SIrr,      X86::CVTSS2SIrm,          0 },
00427     { X86::CVTTPD2DQrr,     X86::CVTTPD2DQrm,         TB_ALIGN_16 },
00428     { X86::CVTTPS2DQrr,     X86::CVTTPS2DQrm,         TB_ALIGN_16 },
00429     { X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm,  0 },
00430     { X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm,     0 },
00431     { X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm,  0 },
00432     { X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm,     0 },
00433     { X86::Int_UCOMISDrr,   X86::Int_UCOMISDrm,       0 },
00434     { X86::Int_UCOMISSrr,   X86::Int_UCOMISSrm,       0 },
00435     { X86::MOV16rr,         X86::MOV16rm,             0 },
00436     { X86::MOV32rr,         X86::MOV32rm,             0 },
00437     { X86::MOV64rr,         X86::MOV64rm,             0 },
00438     { X86::MOV64toPQIrr,    X86::MOVQI2PQIrm,         0 },
00439     { X86::MOV64toSDrr,     X86::MOV64toSDrm,         0 },
00440     { X86::MOV8rr,          X86::MOV8rm,              0 },
00441     { X86::MOVAPDrr,        X86::MOVAPDrm,            TB_ALIGN_16 },
00442     { X86::MOVAPSrr,        X86::MOVAPSrm,            TB_ALIGN_16 },
00443     { X86::MOVDDUPrr,       X86::MOVDDUPrm,           0 },
00444     { X86::MOVDI2PDIrr,     X86::MOVDI2PDIrm,         0 },
00445     { X86::MOVDI2SSrr,      X86::MOVDI2SSrm,          0 },
00446     { X86::MOVDQArr,        X86::MOVDQArm,            TB_ALIGN_16 },
00447     { X86::MOVSHDUPrr,      X86::MOVSHDUPrm,          TB_ALIGN_16 },
00448     { X86::MOVSLDUPrr,      X86::MOVSLDUPrm,          TB_ALIGN_16 },
00449     { X86::MOVSX16rr8,      X86::MOVSX16rm8,          0 },
00450     { X86::MOVSX32rr16,     X86::MOVSX32rm16,         0 },
00451     { X86::MOVSX32rr8,      X86::MOVSX32rm8,          0 },
00452     { X86::MOVSX64rr16,     X86::MOVSX64rm16,         0 },
00453     { X86::MOVSX64rr32,     X86::MOVSX64rm32,         0 },
00454     { X86::MOVSX64rr8,      X86::MOVSX64rm8,          0 },
00455     { X86::MOVUPDrr,        X86::MOVUPDrm,            TB_ALIGN_16 },
00456     { X86::MOVUPSrr,        X86::MOVUPSrm,            0 },
00457     { X86::MOVZQI2PQIrr,    X86::MOVZQI2PQIrm,        0 },
00458     { X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm,     TB_ALIGN_16 },
00459     { X86::MOVZX16rr8,      X86::MOVZX16rm8,          0 },
00460     { X86::MOVZX32rr16,     X86::MOVZX32rm16,         0 },
00461     { X86::MOVZX32_NOREXrr8, X86::MOVZX32_NOREXrm8,   0 },
00462     { X86::MOVZX32rr8,      X86::MOVZX32rm8,          0 },
00463     { X86::PABSBrr128,      X86::PABSBrm128,          TB_ALIGN_16 },
00464     { X86::PABSDrr128,      X86::PABSDrm128,          TB_ALIGN_16 },
00465     { X86::PABSWrr128,      X86::PABSWrm128,          TB_ALIGN_16 },
00466     { X86::PSHUFDri,        X86::PSHUFDmi,            TB_ALIGN_16 },
00467     { X86::PSHUFHWri,       X86::PSHUFHWmi,           TB_ALIGN_16 },
00468     { X86::PSHUFLWri,       X86::PSHUFLWmi,           TB_ALIGN_16 },
00469     { X86::RCPPSr,          X86::RCPPSm,              TB_ALIGN_16 },
00470     { X86::RCPPSr_Int,      X86::RCPPSm_Int,          TB_ALIGN_16 },
00471     { X86::RSQRTPSr,        X86::RSQRTPSm,            TB_ALIGN_16 },
00472     { X86::RSQRTPSr_Int,    X86::RSQRTPSm_Int,        TB_ALIGN_16 },
00473     { X86::RSQRTSSr,        X86::RSQRTSSm,            0 },
00474     { X86::RSQRTSSr_Int,    X86::RSQRTSSm_Int,        0 },
00475     { X86::SQRTPDr,         X86::SQRTPDm,             TB_ALIGN_16 },
00476     { X86::SQRTPSr,         X86::SQRTPSm,             TB_ALIGN_16 },
00477     { X86::SQRTSDr,         X86::SQRTSDm,             0 },
00478     { X86::SQRTSDr_Int,     X86::SQRTSDm_Int,         0 },
00479     { X86::SQRTSSr,         X86::SQRTSSm,             0 },
00480     { X86::SQRTSSr_Int,     X86::SQRTSSm_Int,         0 },
00481     { X86::TEST16rr,        X86::TEST16rm,            0 },
00482     { X86::TEST32rr,        X86::TEST32rm,            0 },
00483     { X86::TEST64rr,        X86::TEST64rm,            0 },
00484     { X86::TEST8rr,         X86::TEST8rm,             0 },
00485     // FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0
00486     { X86::UCOMISDrr,       X86::UCOMISDrm,           0 },
00487     { X86::UCOMISSrr,       X86::UCOMISSrm,           0 },
00488     // AVX 128-bit versions of foldable instructions
00489     { X86::Int_VCOMISDrr,   X86::Int_VCOMISDrm,       0 },
00490     { X86::Int_VCOMISSrr,   X86::Int_VCOMISSrm,       0 },
00491     { X86::Int_VUCOMISDrr,  X86::Int_VUCOMISDrm,      0 },
00492     { X86::Int_VUCOMISSrr,  X86::Int_VUCOMISSrm,      0 },
00493     { X86::VCVTTSD2SI64rr,  X86::VCVTTSD2SI64rm,      0 },
00494     { X86::Int_VCVTTSD2SI64rr,X86::Int_VCVTTSD2SI64rm,0 },
00495     { X86::VCVTTSD2SIrr,    X86::VCVTTSD2SIrm,        0 },
00496     { X86::Int_VCVTTSD2SIrr,X86::Int_VCVTTSD2SIrm,    0 },
00497     { X86::VCVTTSS2SI64rr,  X86::VCVTTSS2SI64rm,      0 },
00498     { X86::Int_VCVTTSS2SI64rr,X86::Int_VCVTTSS2SI64rm,0 },
00499     { X86::VCVTTSS2SIrr,    X86::VCVTTSS2SIrm,        0 },
00500     { X86::Int_VCVTTSS2SIrr,X86::Int_VCVTTSS2SIrm,    0 },
00501     { X86::VCVTSD2SI64rr,   X86::VCVTSD2SI64rm,       0 },
00502     { X86::VCVTSD2SIrr,     X86::VCVTSD2SIrm,         0 },
00503     { X86::VCVTSS2SI64rr,   X86::VCVTSS2SI64rm,       0 },
00504     { X86::VCVTSS2SIrr,     X86::VCVTSS2SIrm,         0 },
00505     { X86::VMOV64toPQIrr,   X86::VMOVQI2PQIrm,        0 },
00506     { X86::VMOV64toSDrr,    X86::VMOV64toSDrm,        0 },
00507     { X86::VMOVAPDrr,       X86::VMOVAPDrm,           TB_ALIGN_16 },
00508     { X86::VMOVAPSrr,       X86::VMOVAPSrm,           TB_ALIGN_16 },
00509     { X86::VMOVDDUPrr,      X86::VMOVDDUPrm,          0 },
00510     { X86::VMOVDI2PDIrr,    X86::VMOVDI2PDIrm,        0 },
00511     { X86::VMOVDI2SSrr,     X86::VMOVDI2SSrm,         0 },
00512     { X86::VMOVDQArr,       X86::VMOVDQArm,           TB_ALIGN_16 },
00513     { X86::VMOVSLDUPrr,     X86::VMOVSLDUPrm,         TB_ALIGN_16 },
00514     { X86::VMOVSHDUPrr,     X86::VMOVSHDUPrm,         TB_ALIGN_16 },
00515     { X86::VMOVUPDrr,       X86::VMOVUPDrm,           0 },
00516     { X86::VMOVUPSrr,       X86::VMOVUPSrm,           0 },
00517     { X86::VMOVZQI2PQIrr,   X86::VMOVZQI2PQIrm,       0 },
00518     { X86::VMOVZPQILo2PQIrr,X86::VMOVZPQILo2PQIrm,    TB_ALIGN_16 },
00519     { X86::VPABSBrr128,     X86::VPABSBrm128,         0 },
00520     { X86::VPABSDrr128,     X86::VPABSDrm128,         0 },
00521     { X86::VPABSWrr128,     X86::VPABSWrm128,         0 },
00522     { X86::VPERMILPDri,     X86::VPERMILPDmi,         0 },
00523     { X86::VPERMILPSri,     X86::VPERMILPSmi,         0 },
00524     { X86::VPSHUFDri,       X86::VPSHUFDmi,           0 },
00525     { X86::VPSHUFHWri,      X86::VPSHUFHWmi,          0 },
00526     { X86::VPSHUFLWri,      X86::VPSHUFLWmi,          0 },
00527     { X86::VRCPPSr,         X86::VRCPPSm,             0 },
00528     { X86::VRCPPSr_Int,     X86::VRCPPSm_Int,         0 },
00529     { X86::VRSQRTPSr,       X86::VRSQRTPSm,           0 },
00530     { X86::VRSQRTPSr_Int,   X86::VRSQRTPSm_Int,       0 },
00531     { X86::VSQRTPDr,        X86::VSQRTPDm,            0 },
00532     { X86::VSQRTPSr,        X86::VSQRTPSm,            0 },
00533     { X86::VUCOMISDrr,      X86::VUCOMISDrm,          0 },
00534     { X86::VUCOMISSrr,      X86::VUCOMISSrm,          0 },
00535     { X86::VBROADCASTSSrr,  X86::VBROADCASTSSrm,      TB_NO_REVERSE },
00536 
00537     // AVX 256-bit foldable instructions
00538     { X86::VMOVAPDYrr,      X86::VMOVAPDYrm,          TB_ALIGN_32 },
00539     { X86::VMOVAPSYrr,      X86::VMOVAPSYrm,          TB_ALIGN_32 },
00540     { X86::VMOVDQAYrr,      X86::VMOVDQAYrm,          TB_ALIGN_32 },
00541     { X86::VMOVUPDYrr,      X86::VMOVUPDYrm,          0 },
00542     { X86::VMOVUPSYrr,      X86::VMOVUPSYrm,          0 },
00543     { X86::VPERMILPDYri,    X86::VPERMILPDYmi,        0 },
00544     { X86::VPERMILPSYri,    X86::VPERMILPSYmi,        0 },
00545 
00546     // AVX2 foldable instructions
00547     { X86::VPABSBrr256,     X86::VPABSBrm256,         0 },
00548     { X86::VPABSDrr256,     X86::VPABSDrm256,         0 },
00549     { X86::VPABSWrr256,     X86::VPABSWrm256,         0 },
00550     { X86::VPSHUFDYri,      X86::VPSHUFDYmi,          0 },
00551     { X86::VPSHUFHWYri,     X86::VPSHUFHWYmi,         0 },
00552     { X86::VPSHUFLWYri,     X86::VPSHUFLWYmi,         0 },
00553     { X86::VRCPPSYr,        X86::VRCPPSYm,            0 },
00554     { X86::VRCPPSYr_Int,    X86::VRCPPSYm_Int,        0 },
00555     { X86::VRSQRTPSYr,      X86::VRSQRTPSYm,          0 },
00556     { X86::VSQRTPDYr,       X86::VSQRTPDYm,           0 },
00557     { X86::VSQRTPSYr,       X86::VSQRTPSYm,           0 },
00558     { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrm,     TB_NO_REVERSE },
00559     { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrm,     TB_NO_REVERSE },
00560 
00561     // BMI/BMI2/LZCNT/POPCNT/TBM foldable instructions
00562     { X86::BEXTR32rr,       X86::BEXTR32rm,           0 },
00563     { X86::BEXTR64rr,       X86::BEXTR64rm,           0 },
00564     { X86::BEXTRI32ri,      X86::BEXTRI32mi,          0 },
00565     { X86::BEXTRI64ri,      X86::BEXTRI64mi,          0 },
00566     { X86::BLCFILL32rr,     X86::BLCFILL32rm,         0 },
00567     { X86::BLCFILL64rr,     X86::BLCFILL64rm,         0 },
00568     { X86::BLCI32rr,        X86::BLCI32rm,            0 },
00569     { X86::BLCI64rr,        X86::BLCI64rm,            0 },
00570     { X86::BLCIC32rr,       X86::BLCIC32rm,           0 },
00571     { X86::BLCIC64rr,       X86::BLCIC64rm,           0 },
00572     { X86::BLCMSK32rr,      X86::BLCMSK32rm,          0 },
00573     { X86::BLCMSK64rr,      X86::BLCMSK64rm,          0 },
00574     { X86::BLCS32rr,        X86::BLCS32rm,            0 },
00575     { X86::BLCS64rr,        X86::BLCS64rm,            0 },
00576     { X86::BLSFILL32rr,     X86::BLSFILL32rm,         0 },
00577     { X86::BLSFILL64rr,     X86::BLSFILL64rm,         0 },
00578     { X86::BLSI32rr,        X86::BLSI32rm,            0 },
00579     { X86::BLSI64rr,        X86::BLSI64rm,            0 },
00580     { X86::BLSIC32rr,       X86::BLSIC32rm,           0 },
00581     { X86::BLSIC64rr,       X86::BLSIC64rm,           0 },
00582     { X86::BLSMSK32rr,      X86::BLSMSK32rm,          0 },
00583     { X86::BLSMSK64rr,      X86::BLSMSK64rm,          0 },
00584     { X86::BLSR32rr,        X86::BLSR32rm,            0 },
00585     { X86::BLSR64rr,        X86::BLSR64rm,            0 },
00586     { X86::BZHI32rr,        X86::BZHI32rm,            0 },
00587     { X86::BZHI64rr,        X86::BZHI64rm,            0 },
00588     { X86::LZCNT16rr,       X86::LZCNT16rm,           0 },
00589     { X86::LZCNT32rr,       X86::LZCNT32rm,           0 },
00590     { X86::LZCNT64rr,       X86::LZCNT64rm,           0 },
00591     { X86::POPCNT16rr,      X86::POPCNT16rm,          0 },
00592     { X86::POPCNT32rr,      X86::POPCNT32rm,          0 },
00593     { X86::POPCNT64rr,      X86::POPCNT64rm,          0 },
00594     { X86::RORX32ri,        X86::RORX32mi,            0 },
00595     { X86::RORX64ri,        X86::RORX64mi,            0 },
00596     { X86::SARX32rr,        X86::SARX32rm,            0 },
00597     { X86::SARX64rr,        X86::SARX64rm,            0 },
00598     { X86::SHRX32rr,        X86::SHRX32rm,            0 },
00599     { X86::SHRX64rr,        X86::SHRX64rm,            0 },
00600     { X86::SHLX32rr,        X86::SHLX32rm,            0 },
00601     { X86::SHLX64rr,        X86::SHLX64rm,            0 },
00602     { X86::T1MSKC32rr,      X86::T1MSKC32rm,          0 },
00603     { X86::T1MSKC64rr,      X86::T1MSKC64rm,          0 },
00604     { X86::TZCNT16rr,       X86::TZCNT16rm,           0 },
00605     { X86::TZCNT32rr,       X86::TZCNT32rm,           0 },
00606     { X86::TZCNT64rr,       X86::TZCNT64rm,           0 },
00607     { X86::TZMSK32rr,       X86::TZMSK32rm,           0 },
00608     { X86::TZMSK64rr,       X86::TZMSK64rm,           0 },
00609 
00610     // AVX-512 foldable instructions
00611     { X86::VMOV64toPQIZrr,  X86::VMOVQI2PQIZrm,       0 },
00612     { X86::VMOVDI2SSZrr,    X86::VMOVDI2SSZrm,        0 },
00613     { X86::VMOVAPDZrr,      X86::VMOVAPDZrm,          TB_ALIGN_64 },
00614     { X86::VMOVAPSZrr,      X86::VMOVAPSZrm,          TB_ALIGN_64 },
00615     { X86::VMOVDQA32Zrr,    X86::VMOVDQA32Zrm,        TB_ALIGN_64 },
00616     { X86::VMOVDQA64Zrr,    X86::VMOVDQA64Zrm,        TB_ALIGN_64 },
00617     { X86::VMOVDQU32Zrr,    X86::VMOVDQU32Zrm,        0 },
00618     { X86::VMOVDQU64Zrr,    X86::VMOVDQU64Zrm,        0 },
00619     { X86::VMOVUPDZrr,      X86::VMOVUPDZrm,          0 },
00620     { X86::VMOVUPSZrr,      X86::VMOVUPSZrm,          0 },
00621     { X86::VPABSDZrr,       X86::VPABSDZrm,           0 },
00622     { X86::VPABSQZrr,       X86::VPABSQZrm,           0 },
00623 
00624     // AES foldable instructions
00625     { X86::AESIMCrr,              X86::AESIMCrm,              TB_ALIGN_16 },
00626     { X86::AESKEYGENASSIST128rr,  X86::AESKEYGENASSIST128rm,  TB_ALIGN_16 },
00627     { X86::VAESIMCrr,             X86::VAESIMCrm,             TB_ALIGN_16 },
00628     { X86::VAESKEYGENASSIST128rr, X86::VAESKEYGENASSIST128rm, TB_ALIGN_16 }
00629   };
00630 
00631   for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) {
00632     unsigned RegOp = OpTbl1[i].RegOp;
00633     unsigned MemOp = OpTbl1[i].MemOp;
00634     unsigned Flags = OpTbl1[i].Flags;
00635     AddTableEntry(RegOp2MemOpTable1, MemOp2RegOpTable,
00636                   RegOp, MemOp,
00637                   // Index 1, folded load
00638                   Flags | TB_INDEX_1 | TB_FOLDED_LOAD);
00639   }
00640 
00641   static const X86OpTblEntry OpTbl2[] = {
00642     { X86::ADC32rr,         X86::ADC32rm,       0 },
00643     { X86::ADC64rr,         X86::ADC64rm,       0 },
00644     { X86::ADD16rr,         X86::ADD16rm,       0 },
00645     { X86::ADD16rr_DB,      X86::ADD16rm,       TB_NO_REVERSE },
00646     { X86::ADD32rr,         X86::ADD32rm,       0 },
00647     { X86::ADD32rr_DB,      X86::ADD32rm,       TB_NO_REVERSE },
00648     { X86::ADD64rr,         X86::ADD64rm,       0 },
00649     { X86::ADD64rr_DB,      X86::ADD64rm,       TB_NO_REVERSE },
00650     { X86::ADD8rr,          X86::ADD8rm,        0 },
00651     { X86::ADDPDrr,         X86::ADDPDrm,       TB_ALIGN_16 },
00652     { X86::ADDPSrr,         X86::ADDPSrm,       TB_ALIGN_16 },
00653     { X86::ADDSDrr,         X86::ADDSDrm,       0 },
00654     { X86::ADDSSrr,         X86::ADDSSrm,       0 },
00655     { X86::ADDSUBPDrr,      X86::ADDSUBPDrm,    TB_ALIGN_16 },
00656     { X86::ADDSUBPSrr,      X86::ADDSUBPSrm,    TB_ALIGN_16 },
00657     { X86::AND16rr,         X86::AND16rm,       0 },
00658     { X86::AND32rr,         X86::AND32rm,       0 },
00659     { X86::AND64rr,         X86::AND64rm,       0 },
00660     { X86::AND8rr,          X86::AND8rm,        0 },
00661     { X86::ANDNPDrr,        X86::ANDNPDrm,      TB_ALIGN_16 },
00662     { X86::ANDNPSrr,        X86::ANDNPSrm,      TB_ALIGN_16 },
00663     { X86::ANDPDrr,         X86::ANDPDrm,       TB_ALIGN_16 },
00664     { X86::ANDPSrr,         X86::ANDPSrm,       TB_ALIGN_16 },
00665     { X86::BLENDPDrri,      X86::BLENDPDrmi,    TB_ALIGN_16 },
00666     { X86::BLENDPSrri,      X86::BLENDPSrmi,    TB_ALIGN_16 },
00667     { X86::BLENDVPDrr0,     X86::BLENDVPDrm0,   TB_ALIGN_16 },
00668     { X86::BLENDVPSrr0,     X86::BLENDVPSrm0,   TB_ALIGN_16 },
00669     { X86::CMOVA16rr,       X86::CMOVA16rm,     0 },
00670     { X86::CMOVA32rr,       X86::CMOVA32rm,     0 },
00671     { X86::CMOVA64rr,       X86::CMOVA64rm,     0 },
00672     { X86::CMOVAE16rr,      X86::CMOVAE16rm,    0 },
00673     { X86::CMOVAE32rr,      X86::CMOVAE32rm,    0 },
00674     { X86::CMOVAE64rr,      X86::CMOVAE64rm,    0 },
00675     { X86::CMOVB16rr,       X86::CMOVB16rm,     0 },
00676     { X86::CMOVB32rr,       X86::CMOVB32rm,     0 },
00677     { X86::CMOVB64rr,       X86::CMOVB64rm,     0 },
00678     { X86::CMOVBE16rr,      X86::CMOVBE16rm,    0 },
00679     { X86::CMOVBE32rr,      X86::CMOVBE32rm,    0 },
00680     { X86::CMOVBE64rr,      X86::CMOVBE64rm,    0 },
00681     { X86::CMOVE16rr,       X86::CMOVE16rm,     0 },
00682     { X86::CMOVE32rr,       X86::CMOVE32rm,     0 },
00683     { X86::CMOVE64rr,       X86::CMOVE64rm,     0 },
00684     { X86::CMOVG16rr,       X86::CMOVG16rm,     0 },
00685     { X86::CMOVG32rr,       X86::CMOVG32rm,     0 },
00686     { X86::CMOVG64rr,       X86::CMOVG64rm,     0 },
00687     { X86::CMOVGE16rr,      X86::CMOVGE16rm,    0 },
00688     { X86::CMOVGE32rr,      X86::CMOVGE32rm,    0 },
00689     { X86::CMOVGE64rr,      X86::CMOVGE64rm,    0 },
00690     { X86::CMOVL16rr,       X86::CMOVL16rm,     0 },
00691     { X86::CMOVL32rr,       X86::CMOVL32rm,     0 },
00692     { X86::CMOVL64rr,       X86::CMOVL64rm,     0 },
00693     { X86::CMOVLE16rr,      X86::CMOVLE16rm,    0 },
00694     { X86::CMOVLE32rr,      X86::CMOVLE32rm,    0 },
00695     { X86::CMOVLE64rr,      X86::CMOVLE64rm,    0 },
00696     { X86::CMOVNE16rr,      X86::CMOVNE16rm,    0 },
00697     { X86::CMOVNE32rr,      X86::CMOVNE32rm,    0 },
00698     { X86::CMOVNE64rr,      X86::CMOVNE64rm,    0 },
00699     { X86::CMOVNO16rr,      X86::CMOVNO16rm,    0 },
00700     { X86::CMOVNO32rr,      X86::CMOVNO32rm,    0 },
00701     { X86::CMOVNO64rr,      X86::CMOVNO64rm,    0 },
00702     { X86::CMOVNP16rr,      X86::CMOVNP16rm,    0 },
00703     { X86::CMOVNP32rr,      X86::CMOVNP32rm,    0 },
00704     { X86::CMOVNP64rr,      X86::CMOVNP64rm,    0 },
00705     { X86::CMOVNS16rr,      X86::CMOVNS16rm,    0 },
00706     { X86::CMOVNS32rr,      X86::CMOVNS32rm,    0 },
00707     { X86::CMOVNS64rr,      X86::CMOVNS64rm,    0 },
00708     { X86::CMOVO16rr,       X86::CMOVO16rm,     0 },
00709     { X86::CMOVO32rr,       X86::CMOVO32rm,     0 },
00710     { X86::CMOVO64rr,       X86::CMOVO64rm,     0 },
00711     { X86::CMOVP16rr,       X86::CMOVP16rm,     0 },
00712     { X86::CMOVP32rr,       X86::CMOVP32rm,     0 },
00713     { X86::CMOVP64rr,       X86::CMOVP64rm,     0 },
00714     { X86::CMOVS16rr,       X86::CMOVS16rm,     0 },
00715     { X86::CMOVS32rr,       X86::CMOVS32rm,     0 },
00716     { X86::CMOVS64rr,       X86::CMOVS64rm,     0 },
00717     { X86::CMPPDrri,        X86::CMPPDrmi,      TB_ALIGN_16 },
00718     { X86::CMPPSrri,        X86::CMPPSrmi,      TB_ALIGN_16 },
00719     { X86::CMPSDrr,         X86::CMPSDrm,       0 },
00720     { X86::CMPSSrr,         X86::CMPSSrm,       0 },
00721     { X86::DIVPDrr,         X86::DIVPDrm,       TB_ALIGN_16 },
00722     { X86::DIVPSrr,         X86::DIVPSrm,       TB_ALIGN_16 },
00723     { X86::DIVSDrr,         X86::DIVSDrm,       0 },
00724     { X86::DIVSSrr,         X86::DIVSSrm,       0 },
00725     { X86::FsANDNPDrr,      X86::FsANDNPDrm,    TB_ALIGN_16 },
00726     { X86::FsANDNPSrr,      X86::FsANDNPSrm,    TB_ALIGN_16 },
00727     { X86::FsANDPDrr,       X86::FsANDPDrm,     TB_ALIGN_16 },
00728     { X86::FsANDPSrr,       X86::FsANDPSrm,     TB_ALIGN_16 },
00729     { X86::FsORPDrr,        X86::FsORPDrm,      TB_ALIGN_16 },
00730     { X86::FsORPSrr,        X86::FsORPSrm,      TB_ALIGN_16 },
00731     { X86::FsXORPDrr,       X86::FsXORPDrm,     TB_ALIGN_16 },
00732     { X86::FsXORPSrr,       X86::FsXORPSrm,     TB_ALIGN_16 },
00733     { X86::HADDPDrr,        X86::HADDPDrm,      TB_ALIGN_16 },
00734     { X86::HADDPSrr,        X86::HADDPSrm,      TB_ALIGN_16 },
00735     { X86::HSUBPDrr,        X86::HSUBPDrm,      TB_ALIGN_16 },
00736     { X86::HSUBPSrr,        X86::HSUBPSrm,      TB_ALIGN_16 },
00737     { X86::IMUL16rr,        X86::IMUL16rm,      0 },
00738     { X86::IMUL32rr,        X86::IMUL32rm,      0 },
00739     { X86::IMUL64rr,        X86::IMUL64rm,      0 },
00740     { X86::Int_CMPSDrr,     X86::Int_CMPSDrm,   0 },
00741     { X86::Int_CMPSSrr,     X86::Int_CMPSSrm,   0 },
00742     { X86::Int_CVTSD2SSrr,  X86::Int_CVTSD2SSrm,      0 },
00743     { X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm,    0 },
00744     { X86::Int_CVTSI2SDrr,  X86::Int_CVTSI2SDrm,      0 },
00745     { X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm,    0 },
00746     { X86::Int_CVTSI2SSrr,  X86::Int_CVTSI2SSrm,      0 },
00747     { X86::Int_CVTSS2SDrr,  X86::Int_CVTSS2SDrm,      0 },
00748     { X86::MAXPDrr,         X86::MAXPDrm,       TB_ALIGN_16 },
00749     { X86::MAXPSrr,         X86::MAXPSrm,       TB_ALIGN_16 },
00750     { X86::MAXSDrr,         X86::MAXSDrm,       0 },
00751     { X86::MAXSSrr,         X86::MAXSSrm,       0 },
00752     { X86::MINPDrr,         X86::MINPDrm,       TB_ALIGN_16 },
00753     { X86::MINPSrr,         X86::MINPSrm,       TB_ALIGN_16 },
00754     { X86::MINSDrr,         X86::MINSDrm,       0 },
00755     { X86::MINSSrr,         X86::MINSSrm,       0 },
00756     { X86::MPSADBWrri,      X86::MPSADBWrmi,    TB_ALIGN_16 },
00757     { X86::MULPDrr,         X86::MULPDrm,       TB_ALIGN_16 },
00758     { X86::MULPSrr,         X86::MULPSrm,       TB_ALIGN_16 },
00759     { X86::MULSDrr,         X86::MULSDrm,       0 },
00760     { X86::MULSSrr,         X86::MULSSrm,       0 },
00761     { X86::OR16rr,          X86::OR16rm,        0 },
00762     { X86::OR32rr,          X86::OR32rm,        0 },
00763     { X86::OR64rr,          X86::OR64rm,        0 },
00764     { X86::OR8rr,           X86::OR8rm,         0 },
00765     { X86::ORPDrr,          X86::ORPDrm,        TB_ALIGN_16 },
00766     { X86::ORPSrr,          X86::ORPSrm,        TB_ALIGN_16 },
00767     { X86::PACKSSDWrr,      X86::PACKSSDWrm,    TB_ALIGN_16 },
00768     { X86::PACKSSWBrr,      X86::PACKSSWBrm,    TB_ALIGN_16 },
00769     { X86::PACKUSDWrr,      X86::PACKUSDWrm,    TB_ALIGN_16 },
00770     { X86::PACKUSWBrr,      X86::PACKUSWBrm,    TB_ALIGN_16 },
00771     { X86::PADDBrr,         X86::PADDBrm,       TB_ALIGN_16 },
00772     { X86::PADDDrr,         X86::PADDDrm,       TB_ALIGN_16 },
00773     { X86::PADDQrr,         X86::PADDQrm,       TB_ALIGN_16 },
00774     { X86::PADDSBrr,        X86::PADDSBrm,      TB_ALIGN_16 },
00775     { X86::PADDSWrr,        X86::PADDSWrm,      TB_ALIGN_16 },
00776     { X86::PADDUSBrr,       X86::PADDUSBrm,     TB_ALIGN_16 },
00777     { X86::PADDUSWrr,       X86::PADDUSWrm,     TB_ALIGN_16 },
00778     { X86::PADDWrr,         X86::PADDWrm,       TB_ALIGN_16 },
00779     { X86::PALIGNR128rr,    X86::PALIGNR128rm,  TB_ALIGN_16 },
00780     { X86::PANDNrr,         X86::PANDNrm,       TB_ALIGN_16 },
00781     { X86::PANDrr,          X86::PANDrm,        TB_ALIGN_16 },
00782     { X86::PAVGBrr,         X86::PAVGBrm,       TB_ALIGN_16 },
00783     { X86::PAVGWrr,         X86::PAVGWrm,       TB_ALIGN_16 },
00784     { X86::PBLENDWrri,      X86::PBLENDWrmi,    TB_ALIGN_16 },
00785     { X86::PCMPEQBrr,       X86::PCMPEQBrm,     TB_ALIGN_16 },
00786     { X86::PCMPEQDrr,       X86::PCMPEQDrm,     TB_ALIGN_16 },
00787     { X86::PCMPEQQrr,       X86::PCMPEQQrm,     TB_ALIGN_16 },
00788     { X86::PCMPEQWrr,       X86::PCMPEQWrm,     TB_ALIGN_16 },
00789     { X86::PCMPGTBrr,       X86::PCMPGTBrm,     TB_ALIGN_16 },
00790     { X86::PCMPGTDrr,       X86::PCMPGTDrm,     TB_ALIGN_16 },
00791     { X86::PCMPGTQrr,       X86::PCMPGTQrm,     TB_ALIGN_16 },
00792     { X86::PCMPGTWrr,       X86::PCMPGTWrm,     TB_ALIGN_16 },
00793     { X86::PHADDDrr,        X86::PHADDDrm,      TB_ALIGN_16 },
00794     { X86::PHADDWrr,        X86::PHADDWrm,      TB_ALIGN_16 },
00795     { X86::PHADDSWrr128,    X86::PHADDSWrm128,  TB_ALIGN_16 },
00796     { X86::PHSUBDrr,        X86::PHSUBDrm,      TB_ALIGN_16 },
00797     { X86::PHSUBSWrr128,    X86::PHSUBSWrm128,  TB_ALIGN_16 },
00798     { X86::PHSUBWrr,        X86::PHSUBWrm,      TB_ALIGN_16 },
00799     { X86::PINSRWrri,       X86::PINSRWrmi,     TB_ALIGN_16 },
00800     { X86::PMADDUBSWrr128,  X86::PMADDUBSWrm128, TB_ALIGN_16 },
00801     { X86::PMADDWDrr,       X86::PMADDWDrm,     TB_ALIGN_16 },
00802     { X86::PMAXSWrr,        X86::PMAXSWrm,      TB_ALIGN_16 },
00803     { X86::PMAXUBrr,        X86::PMAXUBrm,      TB_ALIGN_16 },
00804     { X86::PMINSWrr,        X86::PMINSWrm,      TB_ALIGN_16 },
00805     { X86::PMINUBrr,        X86::PMINUBrm,      TB_ALIGN_16 },
00806     { X86::PMINSBrr,        X86::PMINSBrm,      TB_ALIGN_16 },
00807     { X86::PMINSDrr,        X86::PMINSDrm,      TB_ALIGN_16 },
00808     { X86::PMINUDrr,        X86::PMINUDrm,      TB_ALIGN_16 },
00809     { X86::PMINUWrr,        X86::PMINUWrm,      TB_ALIGN_16 },
00810     { X86::PMAXSBrr,        X86::PMAXSBrm,      TB_ALIGN_16 },
00811     { X86::PMAXSDrr,        X86::PMAXSDrm,      TB_ALIGN_16 },
00812     { X86::PMAXUDrr,        X86::PMAXUDrm,      TB_ALIGN_16 },
00813     { X86::PMAXUWrr,        X86::PMAXUWrm,      TB_ALIGN_16 },
00814     { X86::PMULDQrr,        X86::PMULDQrm,      TB_ALIGN_16 },
00815     { X86::PMULHRSWrr128,   X86::PMULHRSWrm128, TB_ALIGN_16 },
00816     { X86::PMULHUWrr,       X86::PMULHUWrm,     TB_ALIGN_16 },
00817     { X86::PMULHWrr,        X86::PMULHWrm,      TB_ALIGN_16 },
00818     { X86::PMULLDrr,        X86::PMULLDrm,      TB_ALIGN_16 },
00819     { X86::PMULLWrr,        X86::PMULLWrm,      TB_ALIGN_16 },
00820     { X86::PMULUDQrr,       X86::PMULUDQrm,     TB_ALIGN_16 },
00821     { X86::PORrr,           X86::PORrm,         TB_ALIGN_16 },
00822     { X86::PSADBWrr,        X86::PSADBWrm,      TB_ALIGN_16 },
00823     { X86::PSHUFBrr,        X86::PSHUFBrm,      TB_ALIGN_16 },
00824     { X86::PSIGNBrr,        X86::PSIGNBrm,      TB_ALIGN_16 },
00825     { X86::PSIGNWrr,        X86::PSIGNWrm,      TB_ALIGN_16 },
00826     { X86::PSIGNDrr,        X86::PSIGNDrm,      TB_ALIGN_16 },
00827     { X86::PSLLDrr,         X86::PSLLDrm,       TB_ALIGN_16 },
00828     { X86::PSLLQrr,         X86::PSLLQrm,       TB_ALIGN_16 },
00829     { X86::PSLLWrr,         X86::PSLLWrm,       TB_ALIGN_16 },
00830     { X86::PSRADrr,         X86::PSRADrm,       TB_ALIGN_16 },
00831     { X86::PSRAWrr,         X86::PSRAWrm,       TB_ALIGN_16 },
00832     { X86::PSRLDrr,         X86::PSRLDrm,       TB_ALIGN_16 },
00833     { X86::PSRLQrr,         X86::PSRLQrm,       TB_ALIGN_16 },
00834     { X86::PSRLWrr,         X86::PSRLWrm,       TB_ALIGN_16 },
00835     { X86::PSUBBrr,         X86::PSUBBrm,       TB_ALIGN_16 },
00836     { X86::PSUBDrr,         X86::PSUBDrm,       TB_ALIGN_16 },
00837     { X86::PSUBSBrr,        X86::PSUBSBrm,      TB_ALIGN_16 },
00838     { X86::PSUBSWrr,        X86::PSUBSWrm,      TB_ALIGN_16 },
00839     { X86::PSUBWrr,         X86::PSUBWrm,       TB_ALIGN_16 },
00840     { X86::PUNPCKHBWrr,     X86::PUNPCKHBWrm,   TB_ALIGN_16 },
00841     { X86::PUNPCKHDQrr,     X86::PUNPCKHDQrm,   TB_ALIGN_16 },
00842     { X86::PUNPCKHQDQrr,    X86::PUNPCKHQDQrm,  TB_ALIGN_16 },
00843     { X86::PUNPCKHWDrr,     X86::PUNPCKHWDrm,   TB_ALIGN_16 },
00844     { X86::PUNPCKLBWrr,     X86::PUNPCKLBWrm,   TB_ALIGN_16 },
00845     { X86::PUNPCKLDQrr,     X86::PUNPCKLDQrm,   TB_ALIGN_16 },
00846     { X86::PUNPCKLQDQrr,    X86::PUNPCKLQDQrm,  TB_ALIGN_16 },
00847     { X86::PUNPCKLWDrr,     X86::PUNPCKLWDrm,   TB_ALIGN_16 },
00848     { X86::PXORrr,          X86::PXORrm,        TB_ALIGN_16 },
00849     { X86::SBB32rr,         X86::SBB32rm,       0 },
00850     { X86::SBB64rr,         X86::SBB64rm,       0 },
00851     { X86::SHUFPDrri,       X86::SHUFPDrmi,     TB_ALIGN_16 },
00852     { X86::SHUFPSrri,       X86::SHUFPSrmi,     TB_ALIGN_16 },
00853     { X86::SUB16rr,         X86::SUB16rm,       0 },
00854     { X86::SUB32rr,         X86::SUB32rm,       0 },
00855     { X86::SUB64rr,         X86::SUB64rm,       0 },
00856     { X86::SUB8rr,          X86::SUB8rm,        0 },
00857     { X86::SUBPDrr,         X86::SUBPDrm,       TB_ALIGN_16 },
00858     { X86::SUBPSrr,         X86::SUBPSrm,       TB_ALIGN_16 },
00859     { X86::SUBSDrr,         X86::SUBSDrm,       0 },
00860     { X86::SUBSSrr,         X86::SUBSSrm,       0 },
00861     // FIXME: TEST*rr -> swapped operand of TEST*mr.
00862     { X86::UNPCKHPDrr,      X86::UNPCKHPDrm,    TB_ALIGN_16 },
00863     { X86::UNPCKHPSrr,      X86::UNPCKHPSrm,    TB_ALIGN_16 },
00864     { X86::UNPCKLPDrr,      X86::UNPCKLPDrm,    TB_ALIGN_16 },
00865     { X86::UNPCKLPSrr,      X86::UNPCKLPSrm,    TB_ALIGN_16 },
00866     { X86::XOR16rr,         X86::XOR16rm,       0 },
00867     { X86::XOR32rr,         X86::XOR32rm,       0 },
00868     { X86::XOR64rr,         X86::XOR64rm,       0 },
00869     { X86::XOR8rr,          X86::XOR8rm,        0 },
00870     { X86::XORPDrr,         X86::XORPDrm,       TB_ALIGN_16 },
00871     { X86::XORPSrr,         X86::XORPSrm,       TB_ALIGN_16 },
00872     // AVX 128-bit versions of foldable instructions
00873     { X86::VCVTSD2SSrr,       X86::VCVTSD2SSrm,        0 },
00874     { X86::Int_VCVTSD2SSrr,   X86::Int_VCVTSD2SSrm,    0 },
00875     { X86::VCVTSI2SD64rr,     X86::VCVTSI2SD64rm,      0 },
00876     { X86::Int_VCVTSI2SD64rr, X86::Int_VCVTSI2SD64rm,  0 },
00877     { X86::VCVTSI2SDrr,       X86::VCVTSI2SDrm,        0 },
00878     { X86::Int_VCVTSI2SDrr,   X86::Int_VCVTSI2SDrm,    0 },
00879     { X86::VCVTSI2SS64rr,     X86::VCVTSI2SS64rm,      0 },
00880     { X86::Int_VCVTSI2SS64rr, X86::Int_VCVTSI2SS64rm,  0 },
00881     { X86::VCVTSI2SSrr,       X86::VCVTSI2SSrm,        0 },
00882     { X86::Int_VCVTSI2SSrr,   X86::Int_VCVTSI2SSrm,    0 },
00883     { X86::VCVTSS2SDrr,       X86::VCVTSS2SDrm,        0 },
00884     { X86::Int_VCVTSS2SDrr,   X86::Int_VCVTSS2SDrm,    0 },
00885     { X86::VCVTTPD2DQrr,      X86::VCVTTPD2DQXrm,      0 },
00886     { X86::VCVTTPS2DQrr,      X86::VCVTTPS2DQrm,       0 },
00887     { X86::VRSQRTSSr,         X86::VRSQRTSSm,          0 },
00888     { X86::VSQRTSDr,          X86::VSQRTSDm,           0 },
00889     { X86::VSQRTSSr,          X86::VSQRTSSm,           0 },
00890     { X86::VADDPDrr,          X86::VADDPDrm,           0 },
00891     { X86::VADDPSrr,          X86::VADDPSrm,           0 },
00892     { X86::VADDSDrr,          X86::VADDSDrm,           0 },
00893     { X86::VADDSSrr,          X86::VADDSSrm,           0 },
00894     { X86::VADDSUBPDrr,       X86::VADDSUBPDrm,        0 },
00895     { X86::VADDSUBPSrr,       X86::VADDSUBPSrm,        0 },
00896     { X86::VANDNPDrr,         X86::VANDNPDrm,          0 },
00897     { X86::VANDNPSrr,         X86::VANDNPSrm,          0 },
00898     { X86::VANDPDrr,          X86::VANDPDrm,           0 },
00899     { X86::VANDPSrr,          X86::VANDPSrm,           0 },
00900     { X86::VBLENDPDrri,       X86::VBLENDPDrmi,        0 },
00901     { X86::VBLENDPSrri,       X86::VBLENDPSrmi,        0 },
00902     { X86::VBLENDVPDrr,       X86::VBLENDVPDrm,        0 },
00903     { X86::VBLENDVPSrr,       X86::VBLENDVPSrm,        0 },
00904     { X86::VCMPPDrri,         X86::VCMPPDrmi,          0 },
00905     { X86::VCMPPSrri,         X86::VCMPPSrmi,          0 },
00906     { X86::VCMPSDrr,          X86::VCMPSDrm,           0 },
00907     { X86::VCMPSSrr,          X86::VCMPSSrm,           0 },
00908     { X86::VDIVPDrr,          X86::VDIVPDrm,           0 },
00909     { X86::VDIVPSrr,          X86::VDIVPSrm,           0 },
00910     { X86::VDIVSDrr,          X86::VDIVSDrm,           0 },
00911     { X86::VDIVSSrr,          X86::VDIVSSrm,           0 },
00912     { X86::VFsANDNPDrr,       X86::VFsANDNPDrm,        TB_ALIGN_16 },
00913     { X86::VFsANDNPSrr,       X86::VFsANDNPSrm,        TB_ALIGN_16 },
00914     { X86::VFsANDPDrr,        X86::VFsANDPDrm,         TB_ALIGN_16 },
00915     { X86::VFsANDPSrr,        X86::VFsANDPSrm,         TB_ALIGN_16 },
00916     { X86::VFsORPDrr,         X86::VFsORPDrm,          TB_ALIGN_16 },
00917     { X86::VFsORPSrr,         X86::VFsORPSrm,          TB_ALIGN_16 },
00918     { X86::VFsXORPDrr,        X86::VFsXORPDrm,         TB_ALIGN_16 },
00919     { X86::VFsXORPSrr,        X86::VFsXORPSrm,         TB_ALIGN_16 },
00920     { X86::VHADDPDrr,         X86::VHADDPDrm,          0 },
00921     { X86::VHADDPSrr,         X86::VHADDPSrm,          0 },
00922     { X86::VHSUBPDrr,         X86::VHSUBPDrm,          0 },
00923     { X86::VHSUBPSrr,         X86::VHSUBPSrm,          0 },
00924     { X86::Int_VCMPSDrr,      X86::Int_VCMPSDrm,       0 },
00925     { X86::Int_VCMPSSrr,      X86::Int_VCMPSSrm,       0 },
00926     { X86::VMAXPDrr,          X86::VMAXPDrm,           0 },
00927     { X86::VMAXPSrr,          X86::VMAXPSrm,           0 },
00928     { X86::VMAXSDrr,          X86::VMAXSDrm,           0 },
00929     { X86::VMAXSSrr,          X86::VMAXSSrm,           0 },
00930     { X86::VMINPDrr,          X86::VMINPDrm,           0 },
00931     { X86::VMINPSrr,          X86::VMINPSrm,           0 },
00932     { X86::VMINSDrr,          X86::VMINSDrm,           0 },
00933     { X86::VMINSSrr,          X86::VMINSSrm,           0 },
00934     { X86::VMPSADBWrri,       X86::VMPSADBWrmi,        0 },
00935     { X86::VMULPDrr,          X86::VMULPDrm,           0 },
00936     { X86::VMULPSrr,          X86::VMULPSrm,           0 },
00937     { X86::VMULSDrr,          X86::VMULSDrm,           0 },
00938     { X86::VMULSSrr,          X86::VMULSSrm,           0 },
00939     { X86::VORPDrr,           X86::VORPDrm,            0 },
00940     { X86::VORPSrr,           X86::VORPSrm,            0 },
00941     { X86::VPACKSSDWrr,       X86::VPACKSSDWrm,        0 },
00942     { X86::VPACKSSWBrr,       X86::VPACKSSWBrm,        0 },
00943     { X86::VPACKUSDWrr,       X86::VPACKUSDWrm,        0 },
00944     { X86::VPACKUSWBrr,       X86::VPACKUSWBrm,        0 },
00945     { X86::VPADDBrr,          X86::VPADDBrm,           0 },
00946     { X86::VPADDDrr,          X86::VPADDDrm,           0 },
00947     { X86::VPADDQrr,          X86::VPADDQrm,           0 },
00948     { X86::VPADDSBrr,         X86::VPADDSBrm,          0 },
00949     { X86::VPADDSWrr,         X86::VPADDSWrm,          0 },
00950     { X86::VPADDUSBrr,        X86::VPADDUSBrm,         0 },
00951     { X86::VPADDUSWrr,        X86::VPADDUSWrm,         0 },
00952     { X86::VPADDWrr,          X86::VPADDWrm,           0 },
00953     { X86::VPALIGNR128rr,     X86::VPALIGNR128rm,      0 },
00954     { X86::VPANDNrr,          X86::VPANDNrm,           0 },
00955     { X86::VPANDrr,           X86::VPANDrm,            0 },
00956     { X86::VPAVGBrr,          X86::VPAVGBrm,           0 },
00957     { X86::VPAVGWrr,          X86::VPAVGWrm,           0 },
00958     { X86::VPBLENDWrri,       X86::VPBLENDWrmi,        0 },
00959     { X86::VPCMPEQBrr,        X86::VPCMPEQBrm,         0 },
00960     { X86::VPCMPEQDrr,        X86::VPCMPEQDrm,         0 },
00961     { X86::VPCMPEQQrr,        X86::VPCMPEQQrm,         0 },
00962     { X86::VPCMPEQWrr,        X86::VPCMPEQWrm,         0 },
00963     { X86::VPCMPGTBrr,        X86::VPCMPGTBrm,         0 },
00964     { X86::VPCMPGTDrr,        X86::VPCMPGTDrm,         0 },
00965     { X86::VPCMPGTQrr,        X86::VPCMPGTQrm,         0 },
00966     { X86::VPCMPGTWrr,        X86::VPCMPGTWrm,         0 },
00967     { X86::VPHADDDrr,         X86::VPHADDDrm,          0 },
00968     { X86::VPHADDSWrr128,     X86::VPHADDSWrm128,      0 },
00969     { X86::VPHADDWrr,         X86::VPHADDWrm,          0 },
00970     { X86::VPHSUBDrr,         X86::VPHSUBDrm,          0 },
00971     { X86::VPHSUBSWrr128,     X86::VPHSUBSWrm128,      0 },
00972     { X86::VPHSUBWrr,         X86::VPHSUBWrm,          0 },
00973     { X86::VPERMILPDrr,       X86::VPERMILPDrm,        0 },
00974     { X86::VPERMILPSrr,       X86::VPERMILPSrm,        0 },
00975     { X86::VPINSRWrri,        X86::VPINSRWrmi,         0 },
00976     { X86::VPMADDUBSWrr128,   X86::VPMADDUBSWrm128,    0 },
00977     { X86::VPMADDWDrr,        X86::VPMADDWDrm,         0 },
00978     { X86::VPMAXSWrr,         X86::VPMAXSWrm,          0 },
00979     { X86::VPMAXUBrr,         X86::VPMAXUBrm,          0 },
00980     { X86::VPMINSWrr,         X86::VPMINSWrm,          0 },
00981     { X86::VPMINUBrr,         X86::VPMINUBrm,          0 },
00982     { X86::VPMINSBrr,         X86::VPMINSBrm,          0 },
00983     { X86::VPMINSDrr,         X86::VPMINSDrm,          0 },
00984     { X86::VPMINUDrr,         X86::VPMINUDrm,          0 },
00985     { X86::VPMINUWrr,         X86::VPMINUWrm,          0 },
00986     { X86::VPMAXSBrr,         X86::VPMAXSBrm,          0 },
00987     { X86::VPMAXSDrr,         X86::VPMAXSDrm,          0 },
00988     { X86::VPMAXUDrr,         X86::VPMAXUDrm,          0 },
00989     { X86::VPMAXUWrr,         X86::VPMAXUWrm,          0 },
00990     { X86::VPMULDQrr,         X86::VPMULDQrm,          0 },
00991     { X86::VPMULHRSWrr128,    X86::VPMULHRSWrm128,     0 },
00992     { X86::VPMULHUWrr,        X86::VPMULHUWrm,         0 },
00993     { X86::VPMULHWrr,         X86::VPMULHWrm,          0 },
00994     { X86::VPMULLDrr,         X86::VPMULLDrm,          0 },
00995     { X86::VPMULLWrr,         X86::VPMULLWrm,          0 },
00996     { X86::VPMULUDQrr,        X86::VPMULUDQrm,         0 },
00997     { X86::VPORrr,            X86::VPORrm,             0 },
00998     { X86::VPSADBWrr,         X86::VPSADBWrm,          0 },
00999     { X86::VPSHUFBrr,         X86::VPSHUFBrm,          0 },
01000     { X86::VPSIGNBrr,         X86::VPSIGNBrm,          0 },
01001     { X86::VPSIGNWrr,         X86::VPSIGNWrm,          0 },
01002     { X86::VPSIGNDrr,         X86::VPSIGNDrm,          0 },
01003     { X86::VPSLLDrr,          X86::VPSLLDrm,           0 },
01004     { X86::VPSLLQrr,          X86::VPSLLQrm,           0 },
01005     { X86::VPSLLWrr,          X86::VPSLLWrm,           0 },
01006     { X86::VPSRADrr,          X86::VPSRADrm,           0 },
01007     { X86::VPSRAWrr,          X86::VPSRAWrm,           0 },
01008     { X86::VPSRLDrr,          X86::VPSRLDrm,           0 },
01009     { X86::VPSRLQrr,          X86::VPSRLQrm,           0 },
01010     { X86::VPSRLWrr,          X86::VPSRLWrm,           0 },
01011     { X86::VPSUBBrr,          X86::VPSUBBrm,           0 },
01012     { X86::VPSUBDrr,          X86::VPSUBDrm,           0 },
01013     { X86::VPSUBSBrr,         X86::VPSUBSBrm,          0 },
01014     { X86::VPSUBSWrr,         X86::VPSUBSWrm,          0 },
01015     { X86::VPSUBWrr,          X86::VPSUBWrm,           0 },
01016     { X86::VPUNPCKHBWrr,      X86::VPUNPCKHBWrm,       0 },
01017     { X86::VPUNPCKHDQrr,      X86::VPUNPCKHDQrm,       0 },
01018     { X86::VPUNPCKHQDQrr,     X86::VPUNPCKHQDQrm,      0 },
01019     { X86::VPUNPCKHWDrr,      X86::VPUNPCKHWDrm,       0 },
01020     { X86::VPUNPCKLBWrr,      X86::VPUNPCKLBWrm,       0 },
01021     { X86::VPUNPCKLDQrr,      X86::VPUNPCKLDQrm,       0 },
01022     { X86::VPUNPCKLQDQrr,     X86::VPUNPCKLQDQrm,      0 },
01023     { X86::VPUNPCKLWDrr,      X86::VPUNPCKLWDrm,       0 },
01024     { X86::VPXORrr,           X86::VPXORrm,            0 },
01025     { X86::VSHUFPDrri,        X86::VSHUFPDrmi,         0 },
01026     { X86::VSHUFPSrri,        X86::VSHUFPSrmi,         0 },
01027     { X86::VSUBPDrr,          X86::VSUBPDrm,           0 },
01028     { X86::VSUBPSrr,          X86::VSUBPSrm,           0 },
01029     { X86::VSUBSDrr,          X86::VSUBSDrm,           0 },
01030     { X86::VSUBSSrr,          X86::VSUBSSrm,           0 },
01031     { X86::VUNPCKHPDrr,       X86::VUNPCKHPDrm,        0 },
01032     { X86::VUNPCKHPSrr,       X86::VUNPCKHPSrm,        0 },
01033     { X86::VUNPCKLPDrr,       X86::VUNPCKLPDrm,        0 },
01034     { X86::VUNPCKLPSrr,       X86::VUNPCKLPSrm,        0 },
01035     { X86::VXORPDrr,          X86::VXORPDrm,           0 },
01036     { X86::VXORPSrr,          X86::VXORPSrm,           0 },
01037     // AVX 256-bit foldable instructions
01038     { X86::VADDPDYrr,         X86::VADDPDYrm,          0 },
01039     { X86::VADDPSYrr,         X86::VADDPSYrm,          0 },
01040     { X86::VADDSUBPDYrr,      X86::VADDSUBPDYrm,       0 },
01041     { X86::VADDSUBPSYrr,      X86::VADDSUBPSYrm,       0 },
01042     { X86::VANDNPDYrr,        X86::VANDNPDYrm,         0 },
01043     { X86::VANDNPSYrr,        X86::VANDNPSYrm,         0 },
01044     { X86::VANDPDYrr,         X86::VANDPDYrm,          0 },
01045     { X86::VANDPSYrr,         X86::VANDPSYrm,          0 },
01046     { X86::VBLENDPDYrri,      X86::VBLENDPDYrmi,       0 },
01047     { X86::VBLENDPSYrri,      X86::VBLENDPSYrmi,       0 },
01048     { X86::VBLENDVPDYrr,      X86::VBLENDVPDYrm,       0 },
01049     { X86::VBLENDVPSYrr,      X86::VBLENDVPSYrm,       0 },
01050     { X86::VCMPPDYrri,        X86::VCMPPDYrmi,         0 },
01051     { X86::VCMPPSYrri,        X86::VCMPPSYrmi,         0 },
01052     { X86::VDIVPDYrr,         X86::VDIVPDYrm,          0 },
01053     { X86::VDIVPSYrr,         X86::VDIVPSYrm,          0 },
01054     { X86::VHADDPDYrr,        X86::VHADDPDYrm,         0 },
01055     { X86::VHADDPSYrr,        X86::VHADDPSYrm,         0 },
01056     { X86::VHSUBPDYrr,        X86::VHSUBPDYrm,         0 },
01057     { X86::VHSUBPSYrr,        X86::VHSUBPSYrm,         0 },
01058     { X86::VINSERTF128rr,     X86::VINSERTF128rm,      0 },
01059     { X86::VMAXPDYrr,         X86::VMAXPDYrm,          0 },
01060     { X86::VMAXPSYrr,         X86::VMAXPSYrm,          0 },
01061     { X86::VMINPDYrr,         X86::VMINPDYrm,          0 },
01062     { X86::VMINPSYrr,         X86::VMINPSYrm,          0 },
01063     { X86::VMULPDYrr,         X86::VMULPDYrm,          0 },
01064     { X86::VMULPSYrr,         X86::VMULPSYrm,          0 },
01065     { X86::VORPDYrr,          X86::VORPDYrm,           0 },
01066     { X86::VORPSYrr,          X86::VORPSYrm,           0 },
01067     { X86::VPERM2F128rr,      X86::VPERM2F128rm,       0 },
01068     { X86::VPERMILPDYrr,      X86::VPERMILPDYrm,       0 },
01069     { X86::VPERMILPSYrr,      X86::VPERMILPSYrm,       0 },
01070     { X86::VSHUFPDYrri,       X86::VSHUFPDYrmi,        0 },
01071     { X86::VSHUFPSYrri,       X86::VSHUFPSYrmi,        0 },
01072     { X86::VSUBPDYrr,         X86::VSUBPDYrm,          0 },
01073     { X86::VSUBPSYrr,         X86::VSUBPSYrm,          0 },
01074     { X86::VUNPCKHPDYrr,      X86::VUNPCKHPDYrm,       0 },
01075     { X86::VUNPCKHPSYrr,      X86::VUNPCKHPSYrm,       0 },
01076     { X86::VUNPCKLPDYrr,      X86::VUNPCKLPDYrm,       0 },
01077     { X86::VUNPCKLPSYrr,      X86::VUNPCKLPSYrm,       0 },
01078     { X86::VXORPDYrr,         X86::VXORPDYrm,          0 },
01079     { X86::VXORPSYrr,         X86::VXORPSYrm,          0 },
01080     // AVX2 foldable instructions
01081     { X86::VINSERTI128rr,     X86::VINSERTI128rm,      0 },
01082     { X86::VPACKSSDWYrr,      X86::VPACKSSDWYrm,       0 },
01083     { X86::VPACKSSWBYrr,      X86::VPACKSSWBYrm,       0 },
01084     { X86::VPACKUSDWYrr,      X86::VPACKUSDWYrm,       0 },
01085     { X86::VPACKUSWBYrr,      X86::VPACKUSWBYrm,       0 },
01086     { X86::VPADDBYrr,         X86::VPADDBYrm,          0 },
01087     { X86::VPADDDYrr,         X86::VPADDDYrm,          0 },
01088     { X86::VPADDQYrr,         X86::VPADDQYrm,          0 },
01089     { X86::VPADDSBYrr,        X86::VPADDSBYrm,         0 },
01090     { X86::VPADDSWYrr,        X86::VPADDSWYrm,         0 },
01091     { X86::VPADDUSBYrr,       X86::VPADDUSBYrm,        0 },
01092     { X86::VPADDUSWYrr,       X86::VPADDUSWYrm,        0 },
01093     { X86::VPADDWYrr,         X86::VPADDWYrm,          0 },
01094     { X86::VPALIGNR256rr,     X86::VPALIGNR256rm,      0 },
01095     { X86::VPANDNYrr,         X86::VPANDNYrm,          0 },
01096     { X86::VPANDYrr,          X86::VPANDYrm,           0 },
01097     { X86::VPAVGBYrr,         X86::VPAVGBYrm,          0 },
01098     { X86::VPAVGWYrr,         X86::VPAVGWYrm,          0 },
01099     { X86::VPBLENDDrri,       X86::VPBLENDDrmi,        0 },
01100     { X86::VPBLENDDYrri,      X86::VPBLENDDYrmi,       0 },
01101     { X86::VPBLENDWYrri,      X86::VPBLENDWYrmi,       0 },
01102     { X86::VPCMPEQBYrr,       X86::VPCMPEQBYrm,        0 },
01103     { X86::VPCMPEQDYrr,       X86::VPCMPEQDYrm,        0 },
01104     { X86::VPCMPEQQYrr,       X86::VPCMPEQQYrm,        0 },
01105     { X86::VPCMPEQWYrr,       X86::VPCMPEQWYrm,        0 },
01106     { X86::VPCMPGTBYrr,       X86::VPCMPGTBYrm,        0 },
01107     { X86::VPCMPGTDYrr,       X86::VPCMPGTDYrm,        0 },
01108     { X86::VPCMPGTQYrr,       X86::VPCMPGTQYrm,        0 },
01109     { X86::VPCMPGTWYrr,       X86::VPCMPGTWYrm,        0 },
01110     { X86::VPERM2I128rr,      X86::VPERM2I128rm,       0 },
01111     { X86::VPERMDYrr,         X86::VPERMDYrm,          0 },
01112     { X86::VPERMPDYri,        X86::VPERMPDYmi,         0 },
01113     { X86::VPERMPSYrr,        X86::VPERMPSYrm,         0 },
01114     { X86::VPERMQYri,         X86::VPERMQYmi,          0 },
01115     { X86::VPHADDDYrr,        X86::VPHADDDYrm,         0 },
01116     { X86::VPHADDSWrr256,     X86::VPHADDSWrm256,      0 },
01117     { X86::VPHADDWYrr,        X86::VPHADDWYrm,         0 },
01118     { X86::VPHSUBDYrr,        X86::VPHSUBDYrm,         0 },
01119     { X86::VPHSUBSWrr256,     X86::VPHSUBSWrm256,      0 },
01120     { X86::VPHSUBWYrr,        X86::VPHSUBWYrm,         0 },
01121     { X86::VPMADDUBSWrr256,   X86::VPMADDUBSWrm256,    0 },
01122     { X86::VPMADDWDYrr,       X86::VPMADDWDYrm,        0 },
01123     { X86::VPMAXSWYrr,        X86::VPMAXSWYrm,         0 },
01124     { X86::VPMAXUBYrr,        X86::VPMAXUBYrm,         0 },
01125     { X86::VPMINSWYrr,        X86::VPMINSWYrm,         0 },
01126     { X86::VPMINUBYrr,        X86::VPMINUBYrm,         0 },
01127     { X86::VPMINSBYrr,        X86::VPMINSBYrm,         0 },
01128     { X86::VPMINSDYrr,        X86::VPMINSDYrm,         0 },
01129     { X86::VPMINUDYrr,        X86::VPMINUDYrm,         0 },
01130     { X86::VPMINUWYrr,        X86::VPMINUWYrm,         0 },
01131     { X86::VPMAXSBYrr,        X86::VPMAXSBYrm,         0 },
01132     { X86::VPMAXSDYrr,        X86::VPMAXSDYrm,         0 },
01133     { X86::VPMAXUDYrr,        X86::VPMAXUDYrm,         0 },
01134     { X86::VPMAXUWYrr,        X86::VPMAXUWYrm,         0 },
01135     { X86::VMPSADBWYrri,      X86::VMPSADBWYrmi,       0 },
01136     { X86::VPMULDQYrr,        X86::VPMULDQYrm,         0 },
01137     { X86::VPMULHRSWrr256,    X86::VPMULHRSWrm256,     0 },
01138     { X86::VPMULHUWYrr,       X86::VPMULHUWYrm,        0 },
01139     { X86::VPMULHWYrr,        X86::VPMULHWYrm,         0 },
01140     { X86::VPMULLDYrr,        X86::VPMULLDYrm,         0 },
01141     { X86::VPMULLWYrr,        X86::VPMULLWYrm,         0 },
01142     { X86::VPMULUDQYrr,       X86::VPMULUDQYrm,        0 },
01143     { X86::VPORYrr,           X86::VPORYrm,            0 },
01144     { X86::VPSADBWYrr,        X86::VPSADBWYrm,         0 },
01145     { X86::VPSHUFBYrr,        X86::VPSHUFBYrm,         0 },
01146     { X86::VPSIGNBYrr,        X86::VPSIGNBYrm,         0 },
01147     { X86::VPSIGNWYrr,        X86::VPSIGNWYrm,         0 },
01148     { X86::VPSIGNDYrr,        X86::VPSIGNDYrm,         0 },
01149     { X86::VPSLLDYrr,         X86::VPSLLDYrm,          0 },
01150     { X86::VPSLLQYrr,         X86::VPSLLQYrm,          0 },
01151     { X86::VPSLLWYrr,         X86::VPSLLWYrm,          0 },
01152     { X86::VPSLLVDrr,         X86::VPSLLVDrm,          0 },
01153     { X86::VPSLLVDYrr,        X86::VPSLLVDYrm,         0 },
01154     { X86::VPSLLVQrr,         X86::VPSLLVQrm,          0 },
01155     { X86::VPSLLVQYrr,        X86::VPSLLVQYrm,         0 },
01156     { X86::VPSRADYrr,         X86::VPSRADYrm,          0 },
01157     { X86::VPSRAWYrr,         X86::VPSRAWYrm,          0 },
01158     { X86::VPSRAVDrr,         X86::VPSRAVDrm,          0 },
01159     { X86::VPSRAVDYrr,        X86::VPSRAVDYrm,         0 },
01160     { X86::VPSRLDYrr,         X86::VPSRLDYrm,          0 },
01161     { X86::VPSRLQYrr,         X86::VPSRLQYrm,          0 },
01162     { X86::VPSRLWYrr,         X86::VPSRLWYrm,          0 },
01163     { X86::VPSRLVDrr,         X86::VPSRLVDrm,          0 },
01164     { X86::VPSRLVDYrr,        X86::VPSRLVDYrm,         0 },
01165     { X86::VPSRLVQrr,         X86::VPSRLVQrm,          0 },
01166     { X86::VPSRLVQYrr,        X86::VPSRLVQYrm,         0 },
01167     { X86::VPSUBBYrr,         X86::VPSUBBYrm,          0 },
01168     { X86::VPSUBDYrr,         X86::VPSUBDYrm,          0 },
01169     { X86::VPSUBSBYrr,        X86::VPSUBSBYrm,         0 },
01170     { X86::VPSUBSWYrr,        X86::VPSUBSWYrm,         0 },
01171     { X86::VPSUBWYrr,         X86::VPSUBWYrm,          0 },
01172     { X86::VPUNPCKHBWYrr,     X86::VPUNPCKHBWYrm,      0 },
01173     { X86::VPUNPCKHDQYrr,     X86::VPUNPCKHDQYrm,      0 },
01174     { X86::VPUNPCKHQDQYrr,    X86::VPUNPCKHQDQYrm,     0 },
01175     { X86::VPUNPCKHWDYrr,     X86::VPUNPCKHWDYrm,      0 },
01176     { X86::VPUNPCKLBWYrr,     X86::VPUNPCKLBWYrm,      0 },
01177     { X86::VPUNPCKLDQYrr,     X86::VPUNPCKLDQYrm,      0 },
01178     { X86::VPUNPCKLQDQYrr,    X86::VPUNPCKLQDQYrm,     0 },
01179     { X86::VPUNPCKLWDYrr,     X86::VPUNPCKLWDYrm,      0 },
01180     { X86::VPXORYrr,          X86::VPXORYrm,           0 },
01181     // FIXME: add AVX 256-bit foldable instructions
01182 
01183     // FMA4 foldable patterns
01184     { X86::VFMADDSS4rr,       X86::VFMADDSS4mr,        0           },
01185     { X86::VFMADDSD4rr,       X86::VFMADDSD4mr,        0           },
01186     { X86::VFMADDPS4rr,       X86::VFMADDPS4mr,        TB_ALIGN_16 },
01187     { X86::VFMADDPD4rr,       X86::VFMADDPD4mr,        TB_ALIGN_16 },
01188     { X86::VFMADDPS4rrY,      X86::VFMADDPS4mrY,       TB_ALIGN_32 },
01189     { X86::VFMADDPD4rrY,      X86::VFMADDPD4mrY,       TB_ALIGN_32 },
01190     { X86::VFNMADDSS4rr,      X86::VFNMADDSS4mr,       0           },
01191     { X86::VFNMADDSD4rr,      X86::VFNMADDSD4mr,       0           },
01192     { X86::VFNMADDPS4rr,      X86::VFNMADDPS4mr,       TB_ALIGN_16 },
01193     { X86::VFNMADDPD4rr,      X86::VFNMADDPD4mr,       TB_ALIGN_16 },
01194     { X86::VFNMADDPS4rrY,     X86::VFNMADDPS4mrY,      TB_ALIGN_32 },
01195     { X86::VFNMADDPD4rrY,     X86::VFNMADDPD4mrY,      TB_ALIGN_32 },
01196     { X86::VFMSUBSS4rr,       X86::VFMSUBSS4mr,        0           },
01197     { X86::VFMSUBSD4rr,       X86::VFMSUBSD4mr,        0           },
01198     { X86::VFMSUBPS4rr,       X86::VFMSUBPS4mr,        TB_ALIGN_16 },
01199     { X86::VFMSUBPD4rr,       X86::VFMSUBPD4mr,        TB_ALIGN_16 },
01200     { X86::VFMSUBPS4rrY,      X86::VFMSUBPS4mrY,       TB_ALIGN_32 },
01201     { X86::VFMSUBPD4rrY,      X86::VFMSUBPD4mrY,       TB_ALIGN_32 },
01202     { X86::VFNMSUBSS4rr,      X86::VFNMSUBSS4mr,       0           },
01203     { X86::VFNMSUBSD4rr,      X86::VFNMSUBSD4mr,       0           },
01204     { X86::VFNMSUBPS4rr,      X86::VFNMSUBPS4mr,       TB_ALIGN_16 },
01205     { X86::VFNMSUBPD4rr,      X86::VFNMSUBPD4mr,       TB_ALIGN_16 },
01206     { X86::VFNMSUBPS4rrY,     X86::VFNMSUBPS4mrY,      TB_ALIGN_32 },
01207     { X86::VFNMSUBPD4rrY,     X86::VFNMSUBPD4mrY,      TB_ALIGN_32 },
01208     { X86::VFMADDSUBPS4rr,    X86::VFMADDSUBPS4mr,     TB_ALIGN_16 },
01209     { X86::VFMADDSUBPD4rr,    X86::VFMADDSUBPD4mr,     TB_ALIGN_16 },
01210     { X86::VFMADDSUBPS4rrY,   X86::VFMADDSUBPS4mrY,    TB_ALIGN_32 },
01211     { X86::VFMADDSUBPD4rrY,   X86::VFMADDSUBPD4mrY,    TB_ALIGN_32 },
01212     { X86::VFMSUBADDPS4rr,    X86::VFMSUBADDPS4mr,     TB_ALIGN_16 },
01213     { X86::VFMSUBADDPD4rr,    X86::VFMSUBADDPD4mr,     TB_ALIGN_16 },
01214     { X86::VFMSUBADDPS4rrY,   X86::VFMSUBADDPS4mrY,    TB_ALIGN_32 },
01215     { X86::VFMSUBADDPD4rrY,   X86::VFMSUBADDPD4mrY,    TB_ALIGN_32 },
01216 
01217     // BMI/BMI2 foldable instructions
01218     { X86::ANDN32rr,          X86::ANDN32rm,            0 },
01219     { X86::ANDN64rr,          X86::ANDN64rm,            0 },
01220     { X86::MULX32rr,          X86::MULX32rm,            0 },
01221     { X86::MULX64rr,          X86::MULX64rm,            0 },
01222     { X86::PDEP32rr,          X86::PDEP32rm,            0 },
01223     { X86::PDEP64rr,          X86::PDEP64rm,            0 },
01224     { X86::PEXT32rr,          X86::PEXT32rm,            0 },
01225     { X86::PEXT64rr,          X86::PEXT64rm,            0 },
01226 
01227     // AVX-512 foldable instructions
01228     { X86::VADDPSZrr,         X86::VADDPSZrm,           0 },
01229     { X86::VADDPDZrr,         X86::VADDPDZrm,           0 },
01230     { X86::VSUBPSZrr,         X86::VSUBPSZrm,           0 },
01231     { X86::VSUBPDZrr,         X86::VSUBPDZrm,           0 },
01232     { X86::VMULPSZrr,         X86::VMULPSZrm,           0 },
01233     { X86::VMULPDZrr,         X86::VMULPDZrm,           0 },
01234     { X86::VDIVPSZrr,         X86::VDIVPSZrm,           0 },
01235     { X86::VDIVPDZrr,         X86::VDIVPDZrm,           0 },
01236     { X86::VMINPSZrr,         X86::VMINPSZrm,           0 },
01237     { X86::VMINPDZrr,         X86::VMINPDZrm,           0 },
01238     { X86::VMAXPSZrr,         X86::VMAXPSZrm,           0 },
01239     { X86::VMAXPDZrr,         X86::VMAXPDZrm,           0 },
01240     { X86::VPADDDZrr,         X86::VPADDDZrm,           0 },
01241     { X86::VPADDQZrr,         X86::VPADDQZrm,           0 },
01242     { X86::VPERMPDZri,        X86::VPERMPDZmi,          0 },
01243     { X86::VPERMPSZrr,        X86::VPERMPSZrm,          0 },
01244     { X86::VPMAXSDZrr,        X86::VPMAXSDZrm,          0 },
01245     { X86::VPMAXSQZrr,        X86::VPMAXSQZrm,          0 },
01246     { X86::VPMAXUDZrr,        X86::VPMAXUDZrm,          0 },
01247     { X86::VPMAXUQZrr,        X86::VPMAXUQZrm,          0 },
01248     { X86::VPMINSDZrr,        X86::VPMINSDZrm,          0 },
01249     { X86::VPMINSQZrr,        X86::VPMINSQZrm,          0 },
01250     { X86::VPMINUDZrr,        X86::VPMINUDZrm,          0 },
01251     { X86::VPMINUQZrr,        X86::VPMINUQZrm,          0 },
01252     { X86::VPMULDQZrr,        X86::VPMULDQZrm,          0 },
01253     { X86::VPSLLVDZrr,        X86::VPSLLVDZrm,          0 },
01254     { X86::VPSLLVQZrr,        X86::VPSLLVQZrm,          0 },
01255     { X86::VPSRAVDZrr,        X86::VPSRAVDZrm,          0 },
01256     { X86::VPSRLVDZrr,        X86::VPSRLVDZrm,          0 },
01257     { X86::VPSRLVQZrr,        X86::VPSRLVQZrm,          0 },
01258     { X86::VPSUBDZrr,         X86::VPSUBDZrm,           0 },
01259     { X86::VPSUBQZrr,         X86::VPSUBQZrm,           0 },
01260     { X86::VSHUFPDZrri,       X86::VSHUFPDZrmi,         0 },
01261     { X86::VSHUFPSZrri,       X86::VSHUFPSZrmi,         0 },
01262     { X86::VALIGNQrri,        X86::VALIGNQrmi,          0 },
01263     { X86::VALIGNDrri,        X86::VALIGNDrmi,          0 },
01264     { X86::VPMULUDQZrr,       X86::VPMULUDQZrm,         0 },
01265 
01266     // AES foldable instructions
01267     { X86::AESDECLASTrr,      X86::AESDECLASTrm,        TB_ALIGN_16 },
01268     { X86::AESDECrr,          X86::AESDECrm,            TB_ALIGN_16 },
01269     { X86::AESENCLASTrr,      X86::AESENCLASTrm,        TB_ALIGN_16 },
01270     { X86::AESENCrr,          X86::AESENCrm,            TB_ALIGN_16 },
01271     { X86::VAESDECLASTrr,     X86::VAESDECLASTrm,       TB_ALIGN_16 },
01272     { X86::VAESDECrr,         X86::VAESDECrm,           TB_ALIGN_16 },
01273     { X86::VAESENCLASTrr,     X86::VAESENCLASTrm,       TB_ALIGN_16 },
01274     { X86::VAESENCrr,         X86::VAESENCrm,           TB_ALIGN_16 },
01275 
01276     // SHA foldable instructions
01277     { X86::SHA1MSG1rr,        X86::SHA1MSG1rm,          TB_ALIGN_16 },
01278     { X86::SHA1MSG2rr,        X86::SHA1MSG2rm,          TB_ALIGN_16 },
01279     { X86::SHA1NEXTErr,       X86::SHA1NEXTErm,         TB_ALIGN_16 },
01280     { X86::SHA1RNDS4rri,      X86::SHA1RNDS4rmi,        TB_ALIGN_16 },
01281     { X86::SHA256MSG1rr,      X86::SHA256MSG1rm,        TB_ALIGN_16 },
01282     { X86::SHA256MSG2rr,      X86::SHA256MSG2rm,        TB_ALIGN_16 },
01283     { X86::SHA256RNDS2rr,     X86::SHA256RNDS2rm,       TB_ALIGN_16 },
01284   };
01285 
01286   for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) {
01287     unsigned RegOp = OpTbl2[i].RegOp;
01288     unsigned MemOp = OpTbl2[i].MemOp;
01289     unsigned Flags = OpTbl2[i].Flags;
01290     AddTableEntry(RegOp2MemOpTable2, MemOp2RegOpTable,
01291                   RegOp, MemOp,
01292                   // Index 2, folded load
01293                   Flags | TB_INDEX_2 | TB_FOLDED_LOAD);
01294   }
01295 
01296   static const X86OpTblEntry OpTbl3[] = {
01297     // FMA foldable instructions
01298     { X86::VFMADDSSr231r,         X86::VFMADDSSr231m,         TB_ALIGN_NONE },
01299     { X86::VFMADDSDr231r,         X86::VFMADDSDr231m,         TB_ALIGN_NONE },
01300     { X86::VFMADDSSr132r,         X86::VFMADDSSr132m,         TB_ALIGN_NONE },
01301     { X86::VFMADDSDr132r,         X86::VFMADDSDr132m,         TB_ALIGN_NONE },
01302     { X86::VFMADDSSr213r,         X86::VFMADDSSr213m,         TB_ALIGN_NONE },
01303     { X86::VFMADDSDr213r,         X86::VFMADDSDr213m,         TB_ALIGN_NONE },
01304 
01305     { X86::VFMADDPSr231r,         X86::VFMADDPSr231m,         TB_ALIGN_NONE },
01306     { X86::VFMADDPDr231r,         X86::VFMADDPDr231m,         TB_ALIGN_NONE },
01307     { X86::VFMADDPSr132r,         X86::VFMADDPSr132m,         TB_ALIGN_NONE },
01308     { X86::VFMADDPDr132r,         X86::VFMADDPDr132m,         TB_ALIGN_NONE },
01309     { X86::VFMADDPSr213r,         X86::VFMADDPSr213m,         TB_ALIGN_NONE },
01310     { X86::VFMADDPDr213r,         X86::VFMADDPDr213m,         TB_ALIGN_NONE },
01311     { X86::VFMADDPSr231rY,        X86::VFMADDPSr231mY,        TB_ALIGN_NONE },
01312     { X86::VFMADDPDr231rY,        X86::VFMADDPDr231mY,        TB_ALIGN_NONE },
01313     { X86::VFMADDPSr132rY,        X86::VFMADDPSr132mY,        TB_ALIGN_NONE },
01314     { X86::VFMADDPDr132rY,        X86::VFMADDPDr132mY,        TB_ALIGN_NONE },
01315     { X86::VFMADDPSr213rY,        X86::VFMADDPSr213mY,        TB_ALIGN_NONE },
01316     { X86::VFMADDPDr213rY,        X86::VFMADDPDr213mY,        TB_ALIGN_NONE },
01317 
01318     { X86::VFNMADDSSr231r,        X86::VFNMADDSSr231m,        TB_ALIGN_NONE },
01319     { X86::VFNMADDSDr231r,        X86::VFNMADDSDr231m,        TB_ALIGN_NONE },
01320     { X86::VFNMADDSSr132r,        X86::VFNMADDSSr132m,        TB_ALIGN_NONE },
01321     { X86::VFNMADDSDr132r,        X86::VFNMADDSDr132m,        TB_ALIGN_NONE },
01322     { X86::VFNMADDSSr213r,        X86::VFNMADDSSr213m,        TB_ALIGN_NONE },
01323     { X86::VFNMADDSDr213r,        X86::VFNMADDSDr213m,        TB_ALIGN_NONE },
01324 
01325     { X86::VFNMADDPSr231r,        X86::VFNMADDPSr231m,        TB_ALIGN_NONE },
01326     { X86::VFNMADDPDr231r,        X86::VFNMADDPDr231m,        TB_ALIGN_NONE },
01327     { X86::VFNMADDPSr132r,        X86::VFNMADDPSr132m,        TB_ALIGN_NONE },
01328     { X86::VFNMADDPDr132r,        X86::VFNMADDPDr132m,        TB_ALIGN_NONE },
01329     { X86::VFNMADDPSr213r,        X86::VFNMADDPSr213m,        TB_ALIGN_NONE },
01330     { X86::VFNMADDPDr213r,        X86::VFNMADDPDr213m,        TB_ALIGN_NONE },
01331     { X86::VFNMADDPSr231rY,       X86::VFNMADDPSr231mY,       TB_ALIGN_NONE },
01332     { X86::VFNMADDPDr231rY,       X86::VFNMADDPDr231mY,       TB_ALIGN_NONE },
01333     { X86::VFNMADDPSr132rY,       X86::VFNMADDPSr132mY,       TB_ALIGN_NONE },
01334     { X86::VFNMADDPDr132rY,       X86::VFNMADDPDr132mY,       TB_ALIGN_NONE },
01335     { X86::VFNMADDPSr213rY,       X86::VFNMADDPSr213mY,       TB_ALIGN_NONE },
01336     { X86::VFNMADDPDr213rY,       X86::VFNMADDPDr213mY,       TB_ALIGN_NONE },
01337 
01338     { X86::VFMSUBSSr231r,         X86::VFMSUBSSr231m,         TB_ALIGN_NONE },
01339     { X86::VFMSUBSDr231r,         X86::VFMSUBSDr231m,         TB_ALIGN_NONE },
01340     { X86::VFMSUBSSr132r,         X86::VFMSUBSSr132m,         TB_ALIGN_NONE },
01341     { X86::VFMSUBSDr132r,         X86::VFMSUBSDr132m,         TB_ALIGN_NONE },
01342     { X86::VFMSUBSSr213r,         X86::VFMSUBSSr213m,         TB_ALIGN_NONE },
01343     { X86::VFMSUBSDr213r,         X86::VFMSUBSDr213m,         TB_ALIGN_NONE },
01344 
01345     { X86::VFMSUBPSr231r,         X86::VFMSUBPSr231m,         TB_ALIGN_NONE },
01346     { X86::VFMSUBPDr231r,         X86::VFMSUBPDr231m,         TB_ALIGN_NONE },
01347     { X86::VFMSUBPSr132r,         X86::VFMSUBPSr132m,         TB_ALIGN_NONE },
01348     { X86::VFMSUBPDr132r,         X86::VFMSUBPDr132m,         TB_ALIGN_NONE },
01349     { X86::VFMSUBPSr213r,         X86::VFMSUBPSr213m,         TB_ALIGN_NONE },
01350     { X86::VFMSUBPDr213r,         X86::VFMSUBPDr213m,         TB_ALIGN_NONE },
01351     { X86::VFMSUBPSr231rY,        X86::VFMSUBPSr231mY,        TB_ALIGN_NONE },
01352     { X86::VFMSUBPDr231rY,        X86::VFMSUBPDr231mY,        TB_ALIGN_NONE },
01353     { X86::VFMSUBPSr132rY,        X86::VFMSUBPSr132mY,        TB_ALIGN_NONE },
01354     { X86::VFMSUBPDr132rY,        X86::VFMSUBPDr132mY,        TB_ALIGN_NONE },
01355     { X86::VFMSUBPSr213rY,        X86::VFMSUBPSr213mY,        TB_ALIGN_NONE },
01356     { X86::VFMSUBPDr213rY,        X86::VFMSUBPDr213mY,        TB_ALIGN_NONE },
01357 
01358     { X86::VFNMSUBSSr231r,        X86::VFNMSUBSSr231m,        TB_ALIGN_NONE },
01359     { X86::VFNMSUBSDr231r,        X86::VFNMSUBSDr231m,        TB_ALIGN_NONE },
01360     { X86::VFNMSUBSSr132r,        X86::VFNMSUBSSr132m,        TB_ALIGN_NONE },
01361     { X86::VFNMSUBSDr132r,        X86::VFNMSUBSDr132m,        TB_ALIGN_NONE },
01362     { X86::VFNMSUBSSr213r,        X86::VFNMSUBSSr213m,        TB_ALIGN_NONE },
01363     { X86::VFNMSUBSDr213r,        X86::VFNMSUBSDr213m,        TB_ALIGN_NONE },
01364 
01365     { X86::VFNMSUBPSr231r,        X86::VFNMSUBPSr231m,        TB_ALIGN_NONE },
01366     { X86::VFNMSUBPDr231r,        X86::VFNMSUBPDr231m,        TB_ALIGN_NONE },
01367     { X86::VFNMSUBPSr132r,        X86::VFNMSUBPSr132m,        TB_ALIGN_NONE },
01368     { X86::VFNMSUBPDr132r,        X86::VFNMSUBPDr132m,        TB_ALIGN_NONE },
01369     { X86::VFNMSUBPSr213r,        X86::VFNMSUBPSr213m,        TB_ALIGN_NONE },
01370     { X86::VFNMSUBPDr213r,        X86::VFNMSUBPDr213m,        TB_ALIGN_NONE },
01371     { X86::VFNMSUBPSr231rY,       X86::VFNMSUBPSr231mY,       TB_ALIGN_NONE },
01372     { X86::VFNMSUBPDr231rY,       X86::VFNMSUBPDr231mY,       TB_ALIGN_NONE },
01373     { X86::VFNMSUBPSr132rY,       X86::VFNMSUBPSr132mY,       TB_ALIGN_NONE },
01374     { X86::VFNMSUBPDr132rY,       X86::VFNMSUBPDr132mY,       TB_ALIGN_NONE },
01375     { X86::VFNMSUBPSr213rY,       X86::VFNMSUBPSr213mY,       TB_ALIGN_NONE },
01376     { X86::VFNMSUBPDr213rY,       X86::VFNMSUBPDr213mY,       TB_ALIGN_NONE },
01377 
01378     { X86::VFMADDSUBPSr231r,      X86::VFMADDSUBPSr231m,      TB_ALIGN_NONE },
01379     { X86::VFMADDSUBPDr231r,      X86::VFMADDSUBPDr231m,      TB_ALIGN_NONE },
01380     { X86::VFMADDSUBPSr132r,      X86::VFMADDSUBPSr132m,      TB_ALIGN_NONE },
01381     { X86::VFMADDSUBPDr132r,      X86::VFMADDSUBPDr132m,      TB_ALIGN_NONE },
01382     { X86::VFMADDSUBPSr213r,      X86::VFMADDSUBPSr213m,      TB_ALIGN_NONE },
01383     { X86::VFMADDSUBPDr213r,      X86::VFMADDSUBPDr213m,      TB_ALIGN_NONE },
01384     { X86::VFMADDSUBPSr231rY,     X86::VFMADDSUBPSr231mY,     TB_ALIGN_NONE },
01385     { X86::VFMADDSUBPDr231rY,     X86::VFMADDSUBPDr231mY,     TB_ALIGN_NONE },
01386     { X86::VFMADDSUBPSr132rY,     X86::VFMADDSUBPSr132mY,     TB_ALIGN_NONE },
01387     { X86::VFMADDSUBPDr132rY,     X86::VFMADDSUBPDr132mY,     TB_ALIGN_NONE },
01388     { X86::VFMADDSUBPSr213rY,     X86::VFMADDSUBPSr213mY,     TB_ALIGN_NONE },
01389     { X86::VFMADDSUBPDr213rY,     X86::VFMADDSUBPDr213mY,     TB_ALIGN_NONE },
01390 
01391     { X86::VFMSUBADDPSr231r,      X86::VFMSUBADDPSr231m,      TB_ALIGN_NONE },
01392     { X86::VFMSUBADDPDr231r,      X86::VFMSUBADDPDr231m,      TB_ALIGN_NONE },
01393     { X86::VFMSUBADDPSr132r,      X86::VFMSUBADDPSr132m,      TB_ALIGN_NONE },
01394     { X86::VFMSUBADDPDr132r,      X86::VFMSUBADDPDr132m,      TB_ALIGN_NONE },
01395     { X86::VFMSUBADDPSr213r,      X86::VFMSUBADDPSr213m,      TB_ALIGN_NONE },
01396     { X86::VFMSUBADDPDr213r,      X86::VFMSUBADDPDr213m,      TB_ALIGN_NONE },
01397     { X86::VFMSUBADDPSr231rY,     X86::VFMSUBADDPSr231mY,     TB_ALIGN_NONE },
01398     { X86::VFMSUBADDPDr231rY,     X86::VFMSUBADDPDr231mY,     TB_ALIGN_NONE },
01399     { X86::VFMSUBADDPSr132rY,     X86::VFMSUBADDPSr132mY,     TB_ALIGN_NONE },
01400     { X86::VFMSUBADDPDr132rY,     X86::VFMSUBADDPDr132mY,     TB_ALIGN_NONE },
01401     { X86::VFMSUBADDPSr213rY,     X86::VFMSUBADDPSr213mY,     TB_ALIGN_NONE },
01402     { X86::VFMSUBADDPDr213rY,     X86::VFMSUBADDPDr213mY,     TB_ALIGN_NONE },
01403 
01404     // FMA4 foldable patterns
01405     { X86::VFMADDSS4rr,           X86::VFMADDSS4rm,           0           },
01406     { X86::VFMADDSD4rr,           X86::VFMADDSD4rm,           0           },
01407     { X86::VFMADDPS4rr,           X86::VFMADDPS4rm,           TB_ALIGN_16 },
01408     { X86::VFMADDPD4rr,           X86::VFMADDPD4rm,           TB_ALIGN_16 },
01409     { X86::VFMADDPS4rrY,          X86::VFMADDPS4rmY,          TB_ALIGN_32 },
01410     { X86::VFMADDPD4rrY,          X86::VFMADDPD4rmY,          TB_ALIGN_32 },
01411     { X86::VFNMADDSS4rr,          X86::VFNMADDSS4rm,          0           },
01412     { X86::VFNMADDSD4rr,          X86::VFNMADDSD4rm,          0           },
01413     { X86::VFNMADDPS4rr,          X86::VFNMADDPS4rm,          TB_ALIGN_16 },
01414     { X86::VFNMADDPD4rr,          X86::VFNMADDPD4rm,          TB_ALIGN_16 },
01415     { X86::VFNMADDPS4rrY,         X86::VFNMADDPS4rmY,         TB_ALIGN_32 },
01416     { X86::VFNMADDPD4rrY,         X86::VFNMADDPD4rmY,         TB_ALIGN_32 },
01417     { X86::VFMSUBSS4rr,           X86::VFMSUBSS4rm,           0           },
01418     { X86::VFMSUBSD4rr,           X86::VFMSUBSD4rm,           0           },
01419     { X86::VFMSUBPS4rr,           X86::VFMSUBPS4rm,           TB_ALIGN_16 },
01420     { X86::VFMSUBPD4rr,           X86::VFMSUBPD4rm,           TB_ALIGN_16 },
01421     { X86::VFMSUBPS4rrY,          X86::VFMSUBPS4rmY,          TB_ALIGN_32 },
01422     { X86::VFMSUBPD4rrY,          X86::VFMSUBPD4rmY,          TB_ALIGN_32 },
01423     { X86::VFNMSUBSS4rr,          X86::VFNMSUBSS4rm,          0           },
01424     { X86::VFNMSUBSD4rr,          X86::VFNMSUBSD4rm,          0           },
01425     { X86::VFNMSUBPS4rr,          X86::VFNMSUBPS4rm,          TB_ALIGN_16 },
01426     { X86::VFNMSUBPD4rr,          X86::VFNMSUBPD4rm,          TB_ALIGN_16 },
01427     { X86::VFNMSUBPS4rrY,         X86::VFNMSUBPS4rmY,         TB_ALIGN_32 },
01428     { X86::VFNMSUBPD4rrY,         X86::VFNMSUBPD4rmY,         TB_ALIGN_32 },
01429     { X86::VFMADDSUBPS4rr,        X86::VFMADDSUBPS4rm,        TB_ALIGN_16 },
01430     { X86::VFMADDSUBPD4rr,        X86::VFMADDSUBPD4rm,        TB_ALIGN_16 },
01431     { X86::VFMADDSUBPS4rrY,       X86::VFMADDSUBPS4rmY,       TB_ALIGN_32 },
01432     { X86::VFMADDSUBPD4rrY,       X86::VFMADDSUBPD4rmY,       TB_ALIGN_32 },
01433     { X86::VFMSUBADDPS4rr,        X86::VFMSUBADDPS4rm,        TB_ALIGN_16 },
01434     { X86::VFMSUBADDPD4rr,        X86::VFMSUBADDPD4rm,        TB_ALIGN_16 },
01435     { X86::VFMSUBADDPS4rrY,       X86::VFMSUBADDPS4rmY,       TB_ALIGN_32 },
01436     { X86::VFMSUBADDPD4rrY,       X86::VFMSUBADDPD4rmY,       TB_ALIGN_32 },
01437     // AVX-512 VPERMI instructions with 3 source operands.
01438     { X86::VPERMI2Drr,            X86::VPERMI2Drm,            0 },
01439     { X86::VPERMI2Qrr,            X86::VPERMI2Qrm,            0 },
01440     { X86::VPERMI2PSrr,           X86::VPERMI2PSrm,           0 },
01441     { X86::VPERMI2PDrr,           X86::VPERMI2PDrm,           0 },
01442     { X86::VBLENDMPDZrr,          X86::VBLENDMPDZrm,          0 },
01443     { X86::VBLENDMPSZrr,          X86::VBLENDMPSZrm,          0 },
01444     { X86::VPBLENDMDZrr,          X86::VPBLENDMDZrm,          0 },
01445     { X86::VPBLENDMQZrr,          X86::VPBLENDMQZrm,          0 }
01446   };
01447 
01448   for (unsigned i = 0, e = array_lengthof(OpTbl3); i != e; ++i) {
01449     unsigned RegOp = OpTbl3[i].RegOp;
01450     unsigned MemOp = OpTbl3[i].MemOp;
01451     unsigned Flags = OpTbl3[i].Flags;
01452     AddTableEntry(RegOp2MemOpTable3, MemOp2RegOpTable,
01453                   RegOp, MemOp,
01454                   // Index 3, folded load
01455                   Flags | TB_INDEX_3 | TB_FOLDED_LOAD);
01456   }
01457 
01458 }
01459 
01460 void
01461 X86InstrInfo::AddTableEntry(RegOp2MemOpTableType &R2MTable,
01462                             MemOp2RegOpTableType &M2RTable,
01463                             unsigned RegOp, unsigned MemOp, unsigned Flags) {
01464     if ((Flags & TB_NO_FORWARD) == 0) {
01465       assert(!R2MTable.count(RegOp) && "Duplicate entry!");
01466       R2MTable[RegOp] = std::make_pair(MemOp, Flags);
01467     }
01468     if ((Flags & TB_NO_REVERSE) == 0) {
01469       assert(!M2RTable.count(MemOp) &&
01470            "Duplicated entries in unfolding maps?");
01471       M2RTable[MemOp] = std::make_pair(RegOp, Flags);
01472     }
01473 }
01474 
01475 bool
01476 X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
01477                                     unsigned &SrcReg, unsigned &DstReg,
01478                                     unsigned &SubIdx) const {
01479   switch (MI.getOpcode()) {
01480   default: break;
01481   case X86::MOVSX16rr8:
01482   case X86::MOVZX16rr8:
01483   case X86::MOVSX32rr8:
01484   case X86::MOVZX32rr8:
01485   case X86::MOVSX64rr8:
01486     if (!Subtarget.is64Bit())
01487       // It's not always legal to reference the low 8-bit of the larger
01488       // register in 32-bit mode.
01489       return false;
01490   case X86::MOVSX32rr16:
01491   case X86::MOVZX32rr16:
01492   case X86::MOVSX64rr16:
01493   case X86::MOVSX64rr32: {
01494     if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
01495       // Be conservative.
01496       return false;
01497     SrcReg = MI.getOperand(1).getReg();
01498     DstReg = MI.getOperand(0).getReg();
01499     switch (MI.getOpcode()) {
01500     default: llvm_unreachable("Unreachable!");
01501     case X86::MOVSX16rr8:
01502     case X86::MOVZX16rr8:
01503     case X86::MOVSX32rr8:
01504     case X86::MOVZX32rr8:
01505     case X86::MOVSX64rr8:
01506       SubIdx = X86::sub_8bit;
01507       break;
01508     case X86::MOVSX32rr16:
01509     case X86::MOVZX32rr16:
01510     case X86::MOVSX64rr16:
01511       SubIdx = X86::sub_16bit;
01512       break;
01513     case X86::MOVSX64rr32:
01514       SubIdx = X86::sub_32bit;
01515       break;
01516     }
01517     return true;
01518   }
01519   }
01520   return false;
01521 }
01522 
01523 /// isFrameOperand - Return true and the FrameIndex if the specified
01524 /// operand and follow operands form a reference to the stack frame.
01525 bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op,
01526                                   int &FrameIndex) const {
01527   if (MI->getOperand(Op+X86::AddrBaseReg).isFI() &&
01528       MI->getOperand(Op+X86::AddrScaleAmt).isImm() &&
01529       MI->getOperand(Op+X86::AddrIndexReg).isReg() &&
01530       MI->getOperand(Op+X86::AddrDisp).isImm() &&
01531       MI->getOperand(Op+X86::AddrScaleAmt).getImm() == 1 &&
01532       MI->getOperand(Op+X86::AddrIndexReg).getReg() == 0 &&
01533       MI->getOperand(Op+X86::AddrDisp).getImm() == 0) {
01534     FrameIndex = MI->getOperand(Op+X86::AddrBaseReg).getIndex();
01535     return true;
01536   }
01537   return false;
01538 }
01539 
01540 static bool isFrameLoadOpcode(int Opcode) {
01541   switch (Opcode) {
01542   default:
01543     return false;
01544   case X86::MOV8rm:
01545   case X86::MOV16rm:
01546   case X86::MOV32rm:
01547   case X86::MOV64rm:
01548   case X86::LD_Fp64m:
01549   case X86::MOVSSrm:
01550   case X86::MOVSDrm:
01551   case X86::MOVAPSrm:
01552   case X86::MOVAPDrm:
01553   case X86::MOVDQArm:
01554   case X86::VMOVSSrm:
01555   case X86::VMOVSDrm:
01556   case X86::VMOVAPSrm:
01557   case X86::VMOVAPDrm:
01558   case X86::VMOVDQArm:
01559   case X86::VMOVAPSYrm:
01560   case X86::VMOVAPDYrm:
01561   case X86::VMOVDQAYrm:
01562   case X86::MMX_MOVD64rm:
01563   case X86::MMX_MOVQ64rm:
01564   case X86::VMOVAPSZrm:
01565   case X86::VMOVUPSZrm:
01566     return true;
01567   }
01568 }
01569 
01570 static bool isFrameStoreOpcode(int Opcode) {
01571   switch (Opcode) {
01572   default: break;
01573   case X86::MOV8mr:
01574   case X86::MOV16mr:
01575   case X86::MOV32mr:
01576   case X86::MOV64mr:
01577   case X86::ST_FpP64m:
01578   case X86::MOVSSmr:
01579   case X86::MOVSDmr:
01580   case X86::MOVAPSmr:
01581   case X86::MOVAPDmr:
01582   case X86::MOVDQAmr:
01583   case X86::VMOVSSmr:
01584   case X86::VMOVSDmr:
01585   case X86::VMOVAPSmr:
01586   case X86::VMOVAPDmr:
01587   case X86::VMOVDQAmr:
01588   case X86::VMOVAPSYmr:
01589   case X86::VMOVAPDYmr:
01590   case X86::VMOVDQAYmr:
01591   case X86::VMOVUPSZmr:
01592   case X86::VMOVAPSZmr:
01593   case X86::MMX_MOVD64mr:
01594   case X86::MMX_MOVQ64mr:
01595   case X86::MMX_MOVNTQmr:
01596     return true;
01597   }
01598   return false;
01599 }
01600 
01601 unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
01602                                            int &FrameIndex) const {
01603   if (isFrameLoadOpcode(MI->getOpcode()))
01604     if (MI->getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
01605       return MI->getOperand(0).getReg();
01606   return 0;
01607 }
01608 
01609 unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
01610                                                  int &FrameIndex) const {
01611   if (isFrameLoadOpcode(MI->getOpcode())) {
01612     unsigned Reg;
01613     if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
01614       return Reg;
01615     // Check for post-frame index elimination operations
01616     const MachineMemOperand *Dummy;
01617     return hasLoadFromStackSlot(MI, Dummy, FrameIndex);
01618   }
01619   return 0;
01620 }
01621 
01622 unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
01623                                           int &FrameIndex) const {
01624   if (isFrameStoreOpcode(MI->getOpcode()))
01625     if (MI->getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
01626         isFrameOperand(MI, 0, FrameIndex))
01627       return MI->getOperand(X86::AddrNumOperands).getReg();
01628   return 0;
01629 }
01630 
01631 unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI,
01632                                                 int &FrameIndex) const {
01633   if (isFrameStoreOpcode(MI->getOpcode())) {
01634     unsigned Reg;
01635     if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
01636       return Reg;
01637     // Check for post-frame index elimination operations
01638     const MachineMemOperand *Dummy;
01639     return hasStoreToStackSlot(MI, Dummy, FrameIndex);
01640   }
01641   return 0;
01642 }
01643 
01644 /// regIsPICBase - Return true if register is PIC base (i.e.g defined by
01645 /// X86::MOVPC32r.
01646 static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
01647   // Don't waste compile time scanning use-def chains of physregs.
01648   if (!TargetRegisterInfo::isVirtualRegister(BaseReg))
01649     return false;
01650   bool isPICBase = false;
01651   for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg),
01652          E = MRI.def_instr_end(); I != E; ++I) {
01653     MachineInstr *DefMI = &*I;
01654     if (DefMI->getOpcode() != X86::MOVPC32r)
01655       return false;
01656     assert(!isPICBase && "More than one PIC base?");
01657     isPICBase = true;
01658   }
01659   return isPICBase;
01660 }
01661 
01662 bool
01663 X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
01664                                                 AliasAnalysis *AA) const {
01665   switch (MI->getOpcode()) {
01666   default: break;
01667   case X86::MOV8rm:
01668   case X86::MOV16rm:
01669   case X86::MOV32rm:
01670   case X86::MOV64rm:
01671   case X86::LD_Fp64m:
01672   case X86::MOVSSrm:
01673   case X86::MOVSDrm:
01674   case X86::MOVAPSrm:
01675   case X86::MOVUPSrm:
01676   case X86::MOVAPDrm:
01677   case X86::MOVDQArm:
01678   case X86::MOVDQUrm:
01679   case X86::VMOVSSrm:
01680   case X86::VMOVSDrm:
01681   case X86::VMOVAPSrm:
01682   case X86::VMOVUPSrm:
01683   case X86::VMOVAPDrm:
01684   case X86::VMOVDQArm:
01685   case X86::VMOVDQUrm:
01686   case X86::VMOVAPSYrm:
01687   case X86::VMOVUPSYrm:
01688   case X86::VMOVAPDYrm:
01689   case X86::VMOVDQAYrm:
01690   case X86::VMOVDQUYrm:
01691   case X86::MMX_MOVD64rm:
01692   case X86::MMX_MOVQ64rm:
01693   case X86::FsVMOVAPSrm:
01694   case X86::FsVMOVAPDrm:
01695   case X86::FsMOVAPSrm:
01696   case X86::FsMOVAPDrm: {
01697     // Loads from constant pools are trivially rematerializable.
01698     if (MI->getOperand(1+X86::AddrBaseReg).isReg() &&
01699         MI->getOperand(1+X86::AddrScaleAmt).isImm() &&
01700         MI->getOperand(1+X86::AddrIndexReg).isReg() &&
01701         MI->getOperand(1+X86::AddrIndexReg).getReg() == 0 &&
01702         MI->isInvariantLoad(AA)) {
01703       unsigned BaseReg = MI->getOperand(1+X86::AddrBaseReg).getReg();
01704       if (BaseReg == 0 || BaseReg == X86::RIP)
01705         return true;
01706       // Allow re-materialization of PIC load.
01707       if (!ReMatPICStubLoad && MI->getOperand(1+X86::AddrDisp).isGlobal())
01708         return false;
01709       const MachineFunction &MF = *MI->getParent()->getParent();
01710       const MachineRegisterInfo &MRI = MF.getRegInfo();
01711       return regIsPICBase(BaseReg, MRI);
01712     }
01713     return false;
01714   }
01715 
01716   case X86::LEA32r:
01717   case X86::LEA64r: {
01718     if (MI->getOperand(1+X86::AddrScaleAmt).isImm() &&
01719         MI->getOperand(1+X86::AddrIndexReg).isReg() &&
01720         MI->getOperand(1+X86::AddrIndexReg).getReg() == 0 &&
01721         !MI->getOperand(1+X86::AddrDisp).isReg()) {
01722       // lea fi#, lea GV, etc. are all rematerializable.
01723       if (!MI->getOperand(1+X86::AddrBaseReg).isReg())
01724         return true;
01725       unsigned BaseReg = MI->getOperand(1+X86::AddrBaseReg).getReg();
01726       if (BaseReg == 0)
01727         return true;
01728       // Allow re-materialization of lea PICBase + x.
01729       const MachineFunction &MF = *MI->getParent()->getParent();
01730       const MachineRegisterInfo &MRI = MF.getRegInfo();
01731       return regIsPICBase(BaseReg, MRI);
01732     }
01733     return false;
01734   }
01735   }
01736 
01737   // All other instructions marked M_REMATERIALIZABLE are always trivially
01738   // rematerializable.
01739   return true;
01740 }
01741 
01742 bool X86InstrInfo::isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
01743                                          MachineBasicBlock::iterator I) const {
01744   MachineBasicBlock::iterator E = MBB.end();
01745 
01746   // For compile time consideration, if we are not able to determine the
01747   // safety after visiting 4 instructions in each direction, we will assume
01748   // it's not safe.
01749   MachineBasicBlock::iterator Iter = I;
01750   for (unsigned i = 0; Iter != E && i < 4; ++i) {
01751     bool SeenDef = false;
01752     for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) {
01753       MachineOperand &MO = Iter->getOperand(j);
01754       if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS))
01755         SeenDef = true;
01756       if (!MO.isReg())
01757         continue;
01758       if (MO.getReg() == X86::EFLAGS) {
01759         if (MO.isUse())
01760           return false;
01761         SeenDef = true;
01762       }
01763     }
01764 
01765     if (SeenDef)
01766       // This instruction defines EFLAGS, no need to look any further.
01767       return true;
01768     ++Iter;
01769     // Skip over DBG_VALUE.
01770     while (Iter != E && Iter->isDebugValue())
01771       ++Iter;
01772   }
01773 
01774   // It is safe to clobber EFLAGS at the end of a block of no successor has it
01775   // live in.
01776   if (Iter == E) {
01777     for (MachineBasicBlock::succ_iterator SI = MBB.succ_begin(),
01778            SE = MBB.succ_end(); SI != SE; ++SI)
01779       if ((*SI)->isLiveIn(X86::EFLAGS))
01780         return false;
01781     return true;
01782   }
01783 
01784   MachineBasicBlock::iterator B = MBB.begin();
01785   Iter = I;
01786   for (unsigned i = 0; i < 4; ++i) {
01787     // If we make it to the beginning of the block, it's safe to clobber
01788     // EFLAGS iff EFLAGS is not live-in.
01789     if (Iter == B)
01790       return !MBB.isLiveIn(X86::EFLAGS);
01791 
01792     --Iter;
01793     // Skip over DBG_VALUE.
01794     while (Iter != B && Iter->isDebugValue())
01795       --Iter;
01796 
01797     bool SawKill = false;
01798     for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) {
01799       MachineOperand &MO = Iter->getOperand(j);
01800       // A register mask may clobber EFLAGS, but we should still look for a
01801       // live EFLAGS def.
01802       if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS))
01803         SawKill = true;
01804       if (MO.isReg() && MO.getReg() == X86::EFLAGS) {
01805         if (MO.isDef()) return MO.isDead();
01806         if (MO.isKill()) SawKill = true;
01807       }
01808     }
01809 
01810     if (SawKill)
01811       // This instruction kills EFLAGS and doesn't redefine it, so
01812       // there's no need to look further.
01813       return true;
01814   }
01815 
01816   // Conservative answer.
01817   return false;
01818 }
01819 
01820 void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
01821                                  MachineBasicBlock::iterator I,
01822                                  unsigned DestReg, unsigned SubIdx,
01823                                  const MachineInstr *Orig,
01824                                  const TargetRegisterInfo &TRI) const {
01825   // MOV32r0 is implemented with a xor which clobbers condition code.
01826   // Re-materialize it as movri instructions to avoid side effects.
01827   unsigned Opc = Orig->getOpcode();
01828   if (Opc == X86::MOV32r0 && !isSafeToClobberEFLAGS(MBB, I)) {
01829     DebugLoc DL = Orig->getDebugLoc();
01830     BuildMI(MBB, I, DL, get(X86::MOV32ri)).addOperand(Orig->getOperand(0))
01831       .addImm(0);
01832   } else {
01833     MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
01834     MBB.insert(I, MI);
01835   }
01836 
01837   MachineInstr *NewMI = std::prev(I);
01838   NewMI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
01839 }
01840 
01841 /// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that
01842 /// is not marked dead.
01843 static bool hasLiveCondCodeDef(MachineInstr *MI) {
01844   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
01845     MachineOperand &MO = MI->getOperand(i);
01846     if (MO.isReg() && MO.isDef() &&
01847         MO.getReg() == X86::EFLAGS && !MO.isDead()) {
01848       return true;
01849     }
01850   }
01851   return false;
01852 }
01853 
01854 /// getTruncatedShiftCount - check whether the shift count for a machine operand
01855 /// is non-zero.
01856 inline static unsigned getTruncatedShiftCount(MachineInstr *MI,
01857                                               unsigned ShiftAmtOperandIdx) {
01858   // The shift count is six bits with the REX.W prefix and five bits without.
01859   unsigned ShiftCountMask = (MI->getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
01860   unsigned Imm = MI->getOperand(ShiftAmtOperandIdx).getImm();
01861   return Imm & ShiftCountMask;
01862 }
01863 
01864 /// isTruncatedShiftCountForLEA - check whether the given shift count is appropriate
01865 /// can be represented by a LEA instruction.
01866 inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {
01867   // Left shift instructions can be transformed into load-effective-address
01868   // instructions if we can encode them appropriately.
01869   // A LEA instruction utilizes a SIB byte to encode it's scale factor.
01870   // The SIB.scale field is two bits wide which means that we can encode any
01871   // shift amount less than 4.
01872   return ShAmt < 4 && ShAmt > 0;
01873 }
01874 
01875 bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
01876                                   unsigned Opc, bool AllowSP,
01877                                   unsigned &NewSrc, bool &isKill, bool &isUndef,
01878                                   MachineOperand &ImplicitOp) const {
01879   MachineFunction &MF = *MI->getParent()->getParent();
01880   const TargetRegisterClass *RC;
01881   if (AllowSP) {
01882     RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
01883   } else {
01884     RC = Opc != X86::LEA32r ?
01885       &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
01886   }
01887   unsigned SrcReg = Src.getReg();
01888 
01889   // For both LEA64 and LEA32 the register already has essentially the right
01890   // type (32-bit or 64-bit) we may just need to forbid SP.
01891   if (Opc != X86::LEA64_32r) {
01892     NewSrc = SrcReg;
01893     isKill = Src.isKill();
01894     isUndef = Src.isUndef();
01895 
01896     if (TargetRegisterInfo::isVirtualRegister(NewSrc) &&
01897         !MF.getRegInfo().constrainRegClass(NewSrc, RC))
01898       return false;
01899 
01900     return true;
01901   }
01902 
01903   // This is for an LEA64_32r and incoming registers are 32-bit. One way or
01904   // another we need to add 64-bit registers to the final MI.
01905   if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
01906     ImplicitOp = Src;
01907     ImplicitOp.setImplicit();
01908 
01909     NewSrc = getX86SubSuperRegister(Src.getReg(), MVT::i64);
01910     MachineBasicBlock::LivenessQueryResult LQR =
01911       MI->getParent()->computeRegisterLiveness(&getRegisterInfo(), NewSrc, MI);
01912 
01913     switch (LQR) {
01914     case MachineBasicBlock::LQR_Unknown:
01915       // We can't give sane liveness flags to the instruction, abandon LEA
01916       // formation.
01917       return false;
01918     case MachineBasicBlock::LQR_Live:
01919       isKill = MI->killsRegister(SrcReg);
01920       isUndef = false;
01921       break;
01922     default:
01923       // The physreg itself is dead, so we have to use it as an <undef>.
01924       isKill = false;
01925       isUndef = true;
01926       break;
01927     }
01928   } else {
01929     // Virtual register of the wrong class, we have to create a temporary 64-bit
01930     // vreg to feed into the LEA.
01931     NewSrc = MF.getRegInfo().createVirtualRegister(RC);
01932     BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
01933             get(TargetOpcode::COPY))
01934       .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
01935         .addOperand(Src);
01936 
01937     // Which is obviously going to be dead after we're done with it.
01938     isKill = true;
01939     isUndef = false;
01940   }
01941 
01942   // We've set all the parameters without issue.
01943   return true;
01944 }
01945 
01946 /// convertToThreeAddressWithLEA - Helper for convertToThreeAddress when
01947 /// 16-bit LEA is disabled, use 32-bit LEA to form 3-address code by promoting
01948 /// to a 32-bit superregister and then truncating back down to a 16-bit
01949 /// subregister.
01950 MachineInstr *
01951 X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
01952                                            MachineFunction::iterator &MFI,
01953                                            MachineBasicBlock::iterator &MBBI,
01954                                            LiveVariables *LV) const {
01955   MachineInstr *MI = MBBI;
01956   unsigned Dest = MI->getOperand(0).getReg();
01957   unsigned Src = MI->getOperand(1).getReg();
01958   bool isDead = MI->getOperand(0).isDead();
01959   bool isKill = MI->getOperand(1).isKill();
01960 
01961   MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
01962   unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
01963   unsigned Opc, leaInReg;
01964   if (Subtarget.is64Bit()) {
01965     Opc = X86::LEA64_32r;
01966     leaInReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
01967   } else {
01968     Opc = X86::LEA32r;
01969     leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
01970   }
01971 
01972   // Build and insert into an implicit UNDEF value. This is OK because
01973   // well be shifting and then extracting the lower 16-bits.
01974   // This has the potential to cause partial register stall. e.g.
01975   //   movw    (%rbp,%rcx,2), %dx
01976   //   leal    -65(%rdx), %esi
01977   // But testing has shown this *does* help performance in 64-bit mode (at
01978   // least on modern x86 machines).
01979   BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg);
01980   MachineInstr *InsMI =
01981     BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
01982     .addReg(leaInReg, RegState::Define, X86::sub_16bit)
01983     .addReg(Src, getKillRegState(isKill));
01984 
01985   MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(),
01986                                     get(Opc), leaOutReg);
01987   switch (MIOpc) {
01988   default: llvm_unreachable("Unreachable!");
01989   case X86::SHL16ri: {
01990     unsigned ShAmt = MI->getOperand(2).getImm();
01991     MIB.addReg(0).addImm(1 << ShAmt)
01992        .addReg(leaInReg, RegState::Kill).addImm(0).addReg(0);
01993     break;
01994   }
01995   case X86::INC16r:
01996   case X86::INC64_16r:
01997     addRegOffset(MIB, leaInReg, true, 1);
01998     break;
01999   case X86::DEC16r:
02000   case X86::DEC64_16r:
02001     addRegOffset(MIB, leaInReg, true, -1);
02002     break;
02003   case X86::ADD16ri:
02004   case X86::ADD16ri8:
02005   case X86::ADD16ri_DB:
02006   case X86::ADD16ri8_DB:
02007     addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
02008     break;
02009   case X86::ADD16rr:
02010   case X86::ADD16rr_DB: {
02011     unsigned Src2 = MI->getOperand(2).getReg();
02012     bool isKill2 = MI->getOperand(2).isKill();
02013     unsigned leaInReg2 = 0;
02014     MachineInstr *InsMI2 = nullptr;
02015     if (Src == Src2) {
02016       // ADD16rr %reg1028<kill>, %reg1028
02017       // just a single insert_subreg.
02018       addRegReg(MIB, leaInReg, true, leaInReg, false);
02019     } else {
02020       if (Subtarget.is64Bit())
02021         leaInReg2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
02022       else
02023         leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
02024       // Build and insert into an implicit UNDEF value. This is OK because
02025       // well be shifting and then extracting the lower 16-bits.
02026       BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF),leaInReg2);
02027       InsMI2 =
02028         BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(TargetOpcode::COPY))
02029         .addReg(leaInReg2, RegState::Define, X86::sub_16bit)
02030         .addReg(Src2, getKillRegState(isKill2));
02031       addRegReg(MIB, leaInReg, true, leaInReg2, true);
02032     }
02033     if (LV && isKill2 && InsMI2)
02034       LV->replaceKillInstruction(Src2, MI, InsMI2);
02035     break;
02036   }
02037   }
02038 
02039   MachineInstr *NewMI = MIB;
02040   MachineInstr *ExtMI =
02041     BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
02042     .addReg(Dest, RegState::Define | getDeadRegState(isDead))
02043     .addReg(leaOutReg, RegState::Kill, X86::sub_16bit);
02044 
02045   if (LV) {
02046     // Update live variables
02047     LV->getVarInfo(leaInReg).Kills.push_back(NewMI);
02048     LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI);
02049     if (isKill)
02050       LV->replaceKillInstruction(Src, MI, InsMI);
02051     if (isDead)
02052       LV->replaceKillInstruction(Dest, MI, ExtMI);
02053   }
02054 
02055   return ExtMI;
02056 }
02057 
02058 /// convertToThreeAddress - This method must be implemented by targets that
02059 /// set the M_CONVERTIBLE_TO_3_ADDR flag.  When this flag is set, the target
02060 /// may be able to convert a two-address instruction into a true
02061 /// three-address instruction on demand.  This allows the X86 target (for
02062 /// example) to convert ADD and SHL instructions into LEA instructions if they
02063 /// would require register copies due to two-addressness.
02064 ///
02065 /// This method returns a null pointer if the transformation cannot be
02066 /// performed, otherwise it returns the new instruction.
02067 ///
02068 MachineInstr *
02069 X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
02070                                     MachineBasicBlock::iterator &MBBI,
02071                                     LiveVariables *LV) const {
02072   MachineInstr *MI = MBBI;
02073 
02074   // The following opcodes also sets the condition code register(s). Only
02075   // convert them to equivalent lea if the condition code register def's
02076   // are dead!
02077   if (hasLiveCondCodeDef(MI))
02078     return nullptr;
02079 
02080   MachineFunction &MF = *MI->getParent()->getParent();
02081   // All instructions input are two-addr instructions.  Get the known operands.
02082   const MachineOperand &Dest = MI->getOperand(0);
02083   const MachineOperand &Src = MI->getOperand(1);
02084 
02085   MachineInstr *NewMI = nullptr;
02086   // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's.  When
02087   // we have better subtarget support, enable the 16-bit LEA generation here.
02088   // 16-bit LEA is also slow on Core2.
02089   bool DisableLEA16 = true;
02090   bool is64Bit = Subtarget.is64Bit();
02091 
02092   unsigned MIOpc = MI->getOpcode();
02093   switch (MIOpc) {
02094   case X86::SHUFPSrri: {
02095     assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!");
02096     if (!Subtarget.hasSSE2()) return nullptr;
02097 
02098     unsigned B = MI->getOperand(1).getReg();
02099     unsigned C = MI->getOperand(2).getReg();
02100     if (B != C) return nullptr;
02101     unsigned M = MI->getOperand(3).getImm();
02102     NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri))
02103       .addOperand(Dest).addOperand(Src).addImm(M);
02104     break;
02105   }
02106   case X86::SHUFPDrri: {
02107     assert(MI->getNumOperands() == 4 && "Unknown shufpd instruction!");
02108     if (!Subtarget.hasSSE2()) return nullptr;
02109 
02110     unsigned B = MI->getOperand(1).getReg();
02111     unsigned C = MI->getOperand(2).getReg();
02112     if (B != C) return nullptr;
02113     unsigned M = MI->getOperand(3).getImm();
02114 
02115     // Convert to PSHUFD mask.
02116     M = ((M & 1) << 1) | ((M & 1) << 3) | ((M & 2) << 4) | ((M & 2) << 6)| 0x44;
02117 
02118     NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri))
02119       .addOperand(Dest).addOperand(Src).addImm(M);
02120     break;
02121   }
02122   case X86::SHL64ri: {
02123     assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
02124     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
02125     if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
02126 
02127     // LEA can't handle RSP.
02128     if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) &&
02129         !MF.getRegInfo().constrainRegClass(Src.getReg(),
02130                                            &X86::GR64_NOSPRegClass))
02131       return nullptr;
02132 
02133     NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
02134       .addOperand(Dest)
02135       .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0);
02136     break;
02137   }
02138   case X86::SHL32ri: {
02139     assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
02140     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
02141     if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
02142 
02143     unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
02144 
02145     // LEA can't handle ESP.
02146     bool isKill, isUndef;
02147     unsigned SrcReg;
02148     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
02149     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
02150                         SrcReg, isKill, isUndef, ImplicitOp))
02151       return nullptr;
02152 
02153     MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
02154       .addOperand(Dest)
02155       .addReg(0).addImm(1 << ShAmt)
02156       .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
02157       .addImm(0).addReg(0);
02158     if (ImplicitOp.getReg() != 0)
02159       MIB.addOperand(ImplicitOp);
02160     NewMI = MIB;
02161 
02162     break;
02163   }
02164   case X86::SHL16ri: {
02165     assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
02166     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
02167     if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
02168 
02169     if (DisableLEA16)
02170       return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : nullptr;
02171     NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
02172       .addOperand(Dest)
02173       .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0);
02174     break;
02175   }
02176   default: {
02177 
02178     switch (MIOpc) {
02179     default: return nullptr;
02180     case X86::INC64r:
02181     case X86::INC32r:
02182     case X86::INC64_32r: {
02183       assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
02184       unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
02185         : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
02186       bool isKill, isUndef;
02187       unsigned SrcReg;
02188       MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
02189       if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
02190                           SrcReg, isKill, isUndef, ImplicitOp))
02191         return nullptr;
02192 
02193       MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
02194           .addOperand(Dest)
02195           .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef));
02196       if (ImplicitOp.getReg() != 0)
02197         MIB.addOperand(ImplicitOp);
02198 
02199       NewMI = addOffset(MIB, 1);
02200       break;
02201     }
02202     case X86::INC16r:
02203     case X86::INC64_16r:
02204       if (DisableLEA16)
02205         return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
02206                        : nullptr;
02207       assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
02208       NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
02209                         .addOperand(Dest).addOperand(Src), 1);
02210       break;
02211     case X86::DEC64r:
02212     case X86::DEC32r:
02213     case X86::DEC64_32r: {
02214       assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
02215       unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
02216         : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
02217 
02218       bool isKill, isUndef;
02219       unsigned SrcReg;
02220       MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
02221       if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
02222                           SrcReg, isKill, isUndef, ImplicitOp))
02223         return nullptr;
02224 
02225       MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
02226           .addOperand(Dest)
02227           .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill));
02228       if (ImplicitOp.getReg() != 0)
02229         MIB.addOperand(ImplicitOp);
02230 
02231       NewMI = addOffset(MIB, -1);
02232 
02233       break;
02234     }
02235     case X86::DEC16r:
02236     case X86::DEC64_16r:
02237       if (DisableLEA16)
02238         return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
02239                        : nullptr;
02240       assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
02241       NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
02242                         .addOperand(Dest).addOperand(Src), -1);
02243       break;
02244     case X86::ADD64rr:
02245     case X86::ADD64rr_DB:
02246     case X86::ADD32rr:
02247     case X86::ADD32rr_DB: {
02248       assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
02249       unsigned Opc;
02250       if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB)
02251         Opc = X86::LEA64r;
02252       else
02253         Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
02254 
02255       bool isKill, isUndef;
02256       unsigned SrcReg;
02257       MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
02258       if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
02259                           SrcReg, isKill, isUndef, ImplicitOp))
02260         return nullptr;
02261 
02262       const MachineOperand &Src2 = MI->getOperand(2);
02263       bool isKill2, isUndef2;
02264       unsigned SrcReg2;
02265       MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
02266       if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false,
02267                           SrcReg2, isKill2, isUndef2, ImplicitOp2))
02268         return nullptr;
02269 
02270       MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
02271         .addOperand(Dest);
02272       if (ImplicitOp.getReg() != 0)
02273         MIB.addOperand(ImplicitOp);
02274       if (ImplicitOp2.getReg() != 0)
02275         MIB.addOperand(ImplicitOp2);
02276 
02277       NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2);
02278 
02279       // Preserve undefness of the operands.
02280       NewMI->getOperand(1).setIsUndef(isUndef);
02281       NewMI->getOperand(3).setIsUndef(isUndef2);
02282 
02283       if (LV && Src2.isKill())
02284         LV->replaceKillInstruction(SrcReg2, MI, NewMI);
02285       break;
02286     }
02287     case X86::ADD16rr:
02288     case X86::ADD16rr_DB: {
02289       if (DisableLEA16)
02290         return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
02291                        : nullptr;
02292       assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
02293       unsigned Src2 = MI->getOperand(2).getReg();
02294       bool isKill2 = MI->getOperand(2).isKill();
02295       NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
02296                         .addOperand(Dest),
02297                         Src.getReg(), Src.isKill(), Src2, isKill2);
02298 
02299       // Preserve undefness of the operands.
02300       bool isUndef = MI->getOperand(1).isUndef();
02301       bool isUndef2 = MI->getOperand(2).isUndef();
02302       NewMI->getOperand(1).setIsUndef(isUndef);
02303       NewMI->getOperand(3).setIsUndef(isUndef2);
02304 
02305       if (LV && isKill2)
02306         LV->replaceKillInstruction(Src2, MI, NewMI);
02307       break;
02308     }
02309     case X86::ADD64ri32:
02310     case X86::ADD64ri8:
02311     case X86::ADD64ri32_DB:
02312     case X86::ADD64ri8_DB:
02313       assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
02314       NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
02315                         .addOperand(Dest).addOperand(Src),
02316                         MI->getOperand(2).getImm());
02317       break;
02318     case X86::ADD32ri:
02319     case X86::ADD32ri8:
02320     case X86::ADD32ri_DB:
02321     case X86::ADD32ri8_DB: {
02322       assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
02323       unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
02324 
02325       bool isKill, isUndef;
02326       unsigned SrcReg;
02327       MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
02328       if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
02329                           SrcReg, isKill, isUndef, ImplicitOp))
02330         return nullptr;
02331 
02332       MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
02333           .addOperand(Dest)
02334           .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill));
02335       if (ImplicitOp.getReg() != 0)
02336         MIB.addOperand(ImplicitOp);
02337 
02338       NewMI = addOffset(MIB, MI->getOperand(2).getImm());
02339       break;
02340     }
02341     case X86::ADD16ri:
02342     case X86::ADD16ri8:
02343     case X86::ADD16ri_DB:
02344     case X86::ADD16ri8_DB:
02345       if (DisableLEA16)
02346         return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
02347                        : nullptr;
02348       assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
02349       NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
02350                         .addOperand(Dest).addOperand(Src),
02351                         MI->getOperand(2).getImm());
02352       break;
02353     }
02354   }
02355   }
02356 
02357   if (!NewMI) return nullptr;
02358 
02359   if (LV) {  // Update live variables
02360     if (Src.isKill())
02361       LV->replaceKillInstruction(Src.getReg(), MI, NewMI);
02362     if (Dest.isDead())
02363       LV->replaceKillInstruction(Dest.getReg(), MI, NewMI);
02364   }
02365 
02366   MFI->insert(MBBI, NewMI);          // Insert the new inst
02367   return NewMI;
02368 }
02369 
02370 /// commuteInstruction - We have a few instructions that must be hacked on to
02371 /// commute them.
02372 ///
02373 MachineInstr *
02374 X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
02375   switch (MI->getOpcode()) {
02376   case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
02377   case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
02378   case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
02379   case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
02380   case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
02381   case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
02382     unsigned Opc;
02383     unsigned Size;
02384     switch (MI->getOpcode()) {
02385     default: llvm_unreachable("Unreachable!");
02386     case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
02387     case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
02388     case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
02389     case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
02390     case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
02391     case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
02392     }
02393     unsigned Amt = MI->getOperand(3).getImm();
02394     if (NewMI) {
02395       MachineFunction &MF = *MI->getParent()->getParent();
02396       MI = MF.CloneMachineInstr(MI);
02397       NewMI = false;
02398     }
02399     MI->setDesc(get(Opc));
02400     MI->getOperand(3).setImm(Size-Amt);
02401     return TargetInstrInfo::commuteInstruction(MI, NewMI);
02402   }
02403   case X86::CMOVB16rr:  case X86::CMOVB32rr:  case X86::CMOVB64rr:
02404   case X86::CMOVAE16rr: case X86::CMOVAE32rr: case X86::CMOVAE64rr:
02405   case X86::CMOVE16rr:  case X86::CMOVE32rr:  case X86::CMOVE64rr:
02406   case X86::CMOVNE16rr: case X86::CMOVNE32rr: case X86::CMOVNE64rr:
02407   case X86::CMOVBE16rr: case X86::CMOVBE32rr: case X86::CMOVBE64rr:
02408   case X86::CMOVA16rr:  case X86::CMOVA32rr:  case X86::CMOVA64rr:
02409   case X86::CMOVL16rr:  case X86::CMOVL32rr:  case X86::CMOVL64rr:
02410   case X86::CMOVGE16rr: case X86::CMOVGE32rr: case X86::CMOVGE64rr:
02411   case X86::CMOVLE16rr: case X86::CMOVLE32rr: case X86::CMOVLE64rr:
02412   case X86::CMOVG16rr:  case X86::CMOVG32rr:  case X86::CMOVG64rr:
02413   case X86::CMOVS16rr:  case X86::CMOVS32rr:  case X86::CMOVS64rr:
02414   case X86::CMOVNS16rr: case X86::CMOVNS32rr: case X86::CMOVNS64rr:
02415   case X86::CMOVP16rr:  case X86::CMOVP32rr:  case X86::CMOVP64rr:
02416   case X86::CMOVNP16rr: case X86::CMOVNP32rr: case X86::CMOVNP64rr:
02417   case X86::CMOVO16rr:  case X86::CMOVO32rr:  case X86::CMOVO64rr:
02418   case X86::CMOVNO16rr: case X86::CMOVNO32rr: case X86::CMOVNO64rr: {
02419     unsigned Opc;
02420     switch (MI->getOpcode()) {
02421     default: llvm_unreachable("Unreachable!");
02422     case X86::CMOVB16rr:  Opc = X86::CMOVAE16rr; break;
02423     case X86::CMOVB32rr:  Opc = X86::CMOVAE32rr; break;
02424     case X86::CMOVB64rr:  Opc = X86::CMOVAE64rr; break;
02425     case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break;
02426     case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break;
02427     case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break;
02428     case X86::CMOVE16rr:  Opc = X86::CMOVNE16rr; break;
02429     case X86::CMOVE32rr:  Opc = X86::CMOVNE32rr; break;
02430     case X86::CMOVE64rr:  Opc = X86::CMOVNE64rr; break;
02431     case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break;
02432     case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break;
02433     case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break;
02434     case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break;
02435     case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break;
02436     case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break;
02437     case X86::CMOVA16rr:  Opc = X86::CMOVBE16rr; break;
02438     case X86::CMOVA32rr:  Opc = X86::CMOVBE32rr; break;
02439     case X86::CMOVA64rr:  Opc = X86::CMOVBE64rr; break;
02440     case X86::CMOVL16rr:  Opc = X86::CMOVGE16rr; break;
02441     case X86::CMOVL32rr:  Opc = X86::CMOVGE32rr; break;
02442     case X86::CMOVL64rr:  Opc = X86::CMOVGE64rr; break;
02443     case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break;
02444     case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break;
02445     case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break;
02446     case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break;
02447     case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break;
02448     case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break;
02449     case X86::CMOVG16rr:  Opc = X86::CMOVLE16rr; break;
02450     case X86::CMOVG32rr:  Opc = X86::CMOVLE32rr; break;
02451     case X86::CMOVG64rr:  Opc = X86::CMOVLE64rr; break;
02452     case X86::CMOVS16rr:  Opc = X86::CMOVNS16rr; break;
02453     case X86::CMOVS32rr:  Opc = X86::CMOVNS32rr; break;
02454     case X86::CMOVS64rr:  Opc = X86::CMOVNS64rr; break;
02455     case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break;
02456     case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break;
02457     case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break;
02458     case X86::CMOVP16rr:  Opc = X86::CMOVNP16rr; break;
02459     case X86::CMOVP32rr:  Opc = X86::CMOVNP32rr; break;
02460     case X86::CMOVP64rr:  Opc = X86::CMOVNP64rr; break;
02461     case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break;
02462     case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break;
02463     case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break;
02464     case X86::CMOVO16rr:  Opc = X86::CMOVNO16rr; break;
02465     case X86::CMOVO32rr:  Opc = X86::CMOVNO32rr; break;
02466     case X86::CMOVO64rr:  Opc = X86::CMOVNO64rr; break;
02467     case X86::CMOVNO16rr: Opc = X86::CMOVO16rr; break;
02468     case X86::CMOVNO32rr: Opc = X86::CMOVO32rr; break;
02469     case X86::CMOVNO64rr: Opc = X86::CMOVO64rr; break;
02470     }
02471     if (NewMI) {
02472       MachineFunction &MF = *MI->getParent()->getParent();
02473       MI = MF.CloneMachineInstr(MI);
02474       NewMI = false;
02475     }
02476     MI->setDesc(get(Opc));
02477     // Fallthrough intended.
02478   }
02479   default:
02480     return TargetInstrInfo::commuteInstruction(MI, NewMI);
02481   }
02482 }
02483 
02484 bool X86InstrInfo::findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
02485                                          unsigned &SrcOpIdx2) const {
02486   switch (MI->getOpcode()) {
02487     case X86::VFMADDPDr231r:
02488     case X86::VFMADDPSr231r:
02489     case X86::VFMADDSDr231r:
02490     case X86::VFMADDSSr231r:
02491     case X86::VFMSUBPDr231r:
02492     case X86::VFMSUBPSr231r:
02493     case X86::VFMSUBSDr231r:
02494     case X86::VFMSUBSSr231r:
02495     case X86::VFNMADDPDr231r:
02496     case X86::VFNMADDPSr231r:
02497     case X86::VFNMADDSDr231r:
02498     case X86::VFNMADDSSr231r:
02499     case X86::VFNMSUBPDr231r:
02500     case X86::VFNMSUBPSr231r:
02501     case X86::VFNMSUBSDr231r:
02502     case X86::VFNMSUBSSr231r:
02503     case X86::VFMADDPDr231rY:
02504     case X86::VFMADDPSr231rY:
02505     case X86::VFMSUBPDr231rY:
02506     case X86::VFMSUBPSr231rY:
02507     case X86::VFNMADDPDr231rY:
02508     case X86::VFNMADDPSr231rY:
02509     case X86::VFNMSUBPDr231rY:
02510     case X86::VFNMSUBPSr231rY:
02511       SrcOpIdx1 = 2;
02512       SrcOpIdx2 = 3;
02513       return true;
02514     default:
02515       return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
02516   }
02517 }
02518 
02519 static X86::CondCode getCondFromBranchOpc(unsigned BrOpc) {
02520   switch (BrOpc) {
02521   default: return X86::COND_INVALID;
02522   case X86::JE_4:  return X86::COND_E;
02523   case X86::JNE_4: return X86::COND_NE;
02524   case X86::JL_4:  return X86::COND_L;
02525   case X86::JLE_4: return X86::COND_LE;
02526   case X86::JG_4:  return X86::COND_G;
02527   case X86::JGE_4: return X86::COND_GE;
02528   case X86::JB_4:  return X86::COND_B;
02529   case X86::JBE_4: return X86::COND_BE;
02530   case X86::JA_4:  return X86::COND_A;
02531   case X86::JAE_4: return X86::COND_AE;
02532   case X86::JS_4:  return X86::COND_S;
02533   case X86::JNS_4: return X86::COND_NS;
02534   case X86::JP_4:  return X86::COND_P;
02535   case X86::JNP_4: return X86::COND_NP;
02536   case X86::JO_4:  return X86::COND_O;
02537   case X86::JNO_4: return X86::COND_NO;
02538   }
02539 }
02540 
02541 /// getCondFromSETOpc - return condition code of a SET opcode.
02542 static X86::CondCode getCondFromSETOpc(unsigned Opc) {
02543   switch (Opc) {
02544   default: return X86::COND_INVALID;
02545   case X86::SETAr:  case X86::SETAm:  return X86::COND_A;
02546   case X86::SETAEr: case X86::SETAEm: return X86::COND_AE;
02547   case X86::SETBr:  case X86::SETBm:  return X86::COND_B;
02548   case X86::SETBEr: case X86::SETBEm: return X86::COND_BE;
02549   case X86::SETEr:  case X86::SETEm:  return X86::COND_E;
02550   case X86::SETGr:  case X86::SETGm:  return X86::COND_G;
02551   case X86::SETGEr: case X86::SETGEm: return X86::COND_GE;
02552   case X86::SETLr:  case X86::SETLm:  return X86::COND_L;
02553   case X86::SETLEr: case X86::SETLEm: return X86::COND_LE;
02554   case X86::SETNEr: case X86::SETNEm: return X86::COND_NE;
02555   case X86::SETNOr: case X86::SETNOm: return X86::COND_NO;
02556   case X86::SETNPr: case X86::SETNPm: return X86::COND_NP;
02557   case X86::SETNSr: case X86::SETNSm: return X86::COND_NS;
02558   case X86::SETOr:  case X86::SETOm:  return X86::COND_O;
02559   case X86::SETPr:  case X86::SETPm:  return X86::COND_P;
02560   case X86::SETSr:  case X86::SETSm:  return X86::COND_S;
02561   }
02562 }
02563 
02564 /// getCondFromCmovOpc - return condition code of a CMov opcode.
02565 X86::CondCode X86::getCondFromCMovOpc(unsigned Opc) {
02566   switch (Opc) {
02567   default: return X86::COND_INVALID;
02568   case X86::CMOVA16rm:  case X86::CMOVA16rr:  case X86::CMOVA32rm:
02569   case X86::CMOVA32rr:  case X86::CMOVA64rm:  case X86::CMOVA64rr:
02570     return X86::COND_A;
02571   case X86::CMOVAE16rm: case X86::CMOVAE16rr: case X86::CMOVAE32rm:
02572   case X86::CMOVAE32rr: case X86::CMOVAE64rm: case X86::CMOVAE64rr:
02573     return X86::COND_AE;
02574   case X86::CMOVB16rm:  case X86::CMOVB16rr:  case X86::CMOVB32rm:
02575   case X86::CMOVB32rr:  case X86::CMOVB64rm:  case X86::CMOVB64rr:
02576     return X86::COND_B;
02577   case X86::CMOVBE16rm: case X86::CMOVBE16rr: case X86::CMOVBE32rm:
02578   case X86::CMOVBE32rr: case X86::CMOVBE64rm: case X86::CMOVBE64rr:
02579     return X86::COND_BE;
02580   case X86::CMOVE16rm:  case X86::CMOVE16rr:  case X86::CMOVE32rm:
02581   case X86::CMOVE32rr:  case X86::CMOVE64rm:  case X86::CMOVE64rr:
02582     return X86::COND_E;
02583   case X86::CMOVG16rm:  case X86::CMOVG16rr:  case X86::CMOVG32rm:
02584   case X86::CMOVG32rr:  case X86::CMOVG64rm:  case X86::CMOVG64rr:
02585     return X86::COND_G;
02586   case X86::CMOVGE16rm: case X86::CMOVGE16rr: case X86::CMOVGE32rm:
02587   case X86::CMOVGE32rr: case X86::CMOVGE64rm: case X86::CMOVGE64rr:
02588     return X86::COND_GE;
02589   case X86::CMOVL16rm:  case X86::CMOVL16rr:  case X86::CMOVL32rm:
02590   case X86::CMOVL32rr:  case X86::CMOVL64rm:  case X86::CMOVL64rr:
02591     return X86::COND_L;
02592   case X86::CMOVLE16rm: case X86::CMOVLE16rr: case X86::CMOVLE32rm:
02593   case X86::CMOVLE32rr: case X86::CMOVLE64rm: case X86::CMOVLE64rr:
02594     return X86::COND_LE;
02595   case X86::CMOVNE16rm: case X86::CMOVNE16rr: case X86::CMOVNE32rm:
02596   case X86::CMOVNE32rr: case X86::CMOVNE64rm: case X86::CMOVNE64rr:
02597     return X86::COND_NE;
02598   case X86::CMOVNO16rm: case X86::CMOVNO16rr: case X86::CMOVNO32rm:
02599   case X86::CMOVNO32rr: case X86::CMOVNO64rm: case X86::CMOVNO64rr:
02600     return X86::COND_NO;
02601   case X86::CMOVNP16rm: case X86::CMOVNP16rr: case X86::CMOVNP32rm:
02602   case X86::CMOVNP32rr: case X86::CMOVNP64rm: case X86::CMOVNP64rr:
02603     return X86::COND_NP;
02604   case X86::CMOVNS16rm: case X86::CMOVNS16rr: case X86::CMOVNS32rm:
02605   case X86::CMOVNS32rr: case X86::CMOVNS64rm: case X86::CMOVNS64rr:
02606     return X86::COND_NS;
02607   case X86::CMOVO16rm:  case X86::CMOVO16rr:  case X86::CMOVO32rm:
02608   case X86::CMOVO32rr:  case X86::CMOVO64rm:  case X86::CMOVO64rr:
02609     return X86::COND_O;
02610   case X86::CMOVP16rm:  case X86::CMOVP16rr:  case X86::CMOVP32rm:
02611   case X86::CMOVP32rr:  case X86::CMOVP64rm:  case X86::CMOVP64rr:
02612     return X86::COND_P;
02613   case X86::CMOVS16rm:  case X86::CMOVS16rr:  case X86::CMOVS32rm:
02614   case X86::CMOVS32rr:  case X86::CMOVS64rm:  case X86::CMOVS64rr:
02615     return X86::COND_S;
02616   }
02617 }
02618 
02619 unsigned X86::GetCondBranchFromCond(X86::CondCode CC) {
02620   switch (CC) {
02621   default: llvm_unreachable("Illegal condition code!");
02622   case X86::COND_E:  return X86::JE_4;
02623   case X86::COND_NE: return X86::JNE_4;
02624   case X86::COND_L:  return X86::JL_4;
02625   case X86::COND_LE: return X86::JLE_4;
02626   case X86::COND_G:  return X86::JG_4;
02627   case X86::COND_GE: return X86::JGE_4;
02628   case X86::COND_B:  return X86::JB_4;
02629   case X86::COND_BE: return X86::JBE_4;
02630   case X86::COND_A:  return X86::JA_4;
02631   case X86::COND_AE: return X86::JAE_4;
02632   case X86::COND_S:  return X86::JS_4;
02633   case X86::COND_NS: return X86::JNS_4;
02634   case X86::COND_P:  return X86::JP_4;
02635   case X86::COND_NP: return X86::JNP_4;
02636   case X86::COND_O:  return X86::JO_4;
02637   case X86::COND_NO: return X86::JNO_4;
02638   }
02639 }
02640 
02641 /// GetOppositeBranchCondition - Return the inverse of the specified condition,
02642 /// e.g. turning COND_E to COND_NE.
02643 X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
02644   switch (CC) {
02645   default: llvm_unreachable("Illegal condition code!");
02646   case X86::COND_E:  return X86::COND_NE;
02647   case X86::COND_NE: return X86::COND_E;
02648   case X86::COND_L:  return X86::COND_GE;
02649   case X86::COND_LE: return X86::COND_G;
02650   case X86::COND_G:  return X86::COND_LE;
02651   case X86::COND_GE: return X86::COND_L;
02652   case X86::COND_B:  return X86::COND_AE;
02653   case X86::COND_BE: return X86::COND_A;
02654   case X86::COND_A:  return X86::COND_BE;
02655   case X86::COND_AE: return X86::COND_B;
02656   case X86::COND_S:  return X86::COND_NS;
02657   case X86::COND_NS: return X86::COND_S;
02658   case X86::COND_P:  return X86::COND_NP;
02659   case X86::COND_NP: return X86::COND_P;
02660   case X86::COND_O:  return X86::COND_NO;
02661   case X86::COND_NO: return X86::COND_O;
02662   }
02663 }
02664 
02665 /// getSwappedCondition - assume the flags are set by MI(a,b), return
02666 /// the condition code if we modify the instructions such that flags are
02667 /// set by MI(b,a).
02668 static X86::CondCode getSwappedCondition(X86::CondCode CC) {
02669   switch (CC) {
02670   default: return X86::COND_INVALID;
02671   case X86::COND_E:  return X86::COND_E;
02672   case X86::COND_NE: return X86::COND_NE;
02673   case X86::COND_L:  return X86::COND_G;
02674   case X86::COND_LE: return X86::COND_GE;
02675   case X86::COND_G:  return X86::COND_L;
02676   case X86::COND_GE: return X86::COND_LE;
02677   case X86::COND_B:  return X86::COND_A;
02678   case X86::COND_BE: return X86::COND_AE;
02679   case X86::COND_A:  return X86::COND_B;
02680   case X86::COND_AE: return X86::COND_BE;
02681   }
02682 }
02683 
02684 /// getSETFromCond - Return a set opcode for the given condition and
02685 /// whether it has memory operand.
02686 unsigned X86::getSETFromCond(CondCode CC, bool HasMemoryOperand) {
02687   static const uint16_t Opc[16][2] = {
02688     { X86::SETAr,  X86::SETAm  },
02689     { X86::SETAEr, X86::SETAEm },
02690     { X86::SETBr,  X86::SETBm  },
02691     { X86::SETBEr, X86::SETBEm },
02692     { X86::SETEr,  X86::SETEm  },
02693     { X86::SETGr,  X86::SETGm  },
02694     { X86::SETGEr, X86::SETGEm },
02695     { X86::SETLr,  X86::SETLm  },
02696     { X86::SETLEr, X86::SETLEm },
02697     { X86::SETNEr, X86::SETNEm },
02698     { X86::SETNOr, X86::SETNOm },
02699     { X86::SETNPr, X86::SETNPm },
02700     { X86::SETNSr, X86::SETNSm },
02701     { X86::SETOr,  X86::SETOm  },
02702     { X86::SETPr,  X86::SETPm  },
02703     { X86::SETSr,  X86::SETSm  }
02704   };
02705 
02706   assert(CC <= LAST_VALID_COND && "Can only handle standard cond codes");
02707   return Opc[CC][HasMemoryOperand ? 1 : 0];
02708 }
02709 
02710 /// getCMovFromCond - Return a cmov opcode for the given condition,
02711 /// register size in bytes, and operand type.
02712 unsigned X86::getCMovFromCond(CondCode CC, unsigned RegBytes,
02713                               bool HasMemoryOperand) {
02714   static const uint16_t Opc[32][3] = {
02715     { X86::CMOVA16rr,  X86::CMOVA32rr,  X86::CMOVA64rr  },
02716     { X86::CMOVAE16rr, X86::CMOVAE32rr, X86::CMOVAE64rr },
02717     { X86::CMOVB16rr,  X86::CMOVB32rr,  X86::CMOVB64rr  },
02718     { X86::CMOVBE16rr, X86::CMOVBE32rr, X86::CMOVBE64rr },
02719     { X86::CMOVE16rr,  X86::CMOVE32rr,  X86::CMOVE64rr  },
02720     { X86::CMOVG16rr,  X86::CMOVG32rr,  X86::CMOVG64rr  },
02721     { X86::CMOVGE16rr, X86::CMOVGE32rr, X86::CMOVGE64rr },
02722     { X86::CMOVL16rr,  X86::CMOVL32rr,  X86::CMOVL64rr  },
02723     { X86::CMOVLE16rr, X86::CMOVLE32rr, X86::CMOVLE64rr },
02724     { X86::CMOVNE16rr, X86::CMOVNE32rr, X86::CMOVNE64rr },
02725     { X86::CMOVNO16rr, X86::CMOVNO32rr, X86::CMOVNO64rr },
02726     { X86::CMOVNP16rr, X86::CMOVNP32rr, X86::CMOVNP64rr },
02727     { X86::CMOVNS16rr, X86::CMOVNS32rr, X86::CMOVNS64rr },
02728     { X86::CMOVO16rr,  X86::CMOVO32rr,  X86::CMOVO64rr  },
02729     { X86::CMOVP16rr,  X86::CMOVP32rr,  X86::CMOVP64rr  },
02730     { X86::CMOVS16rr,  X86::CMOVS32rr,  X86::CMOVS64rr  },
02731     { X86::CMOVA16rm,  X86::CMOVA32rm,  X86::CMOVA64rm  },
02732     { X86::CMOVAE16rm, X86::CMOVAE32rm, X86::CMOVAE64rm },
02733     { X86::CMOVB16rm,  X86::CMOVB32rm,  X86::CMOVB64rm  },
02734     { X86::CMOVBE16rm, X86::CMOVBE32rm, X86::CMOVBE64rm },
02735     { X86::CMOVE16rm,  X86::CMOVE32rm,  X86::CMOVE64rm  },
02736     { X86::CMOVG16rm,  X86::CMOVG32rm,  X86::CMOVG64rm  },
02737     { X86::CMOVGE16rm, X86::CMOVGE32rm, X86::CMOVGE64rm },
02738     { X86::CMOVL16rm,  X86::CMOVL32rm,  X86::CMOVL64rm  },
02739     { X86::CMOVLE16rm, X86::CMOVLE32rm, X86::CMOVLE64rm },
02740     { X86::CMOVNE16rm, X86::CMOVNE32rm, X86::CMOVNE64rm },
02741     { X86::CMOVNO16rm, X86::CMOVNO32rm, X86::CMOVNO64rm },
02742     { X86::CMOVNP16rm, X86::CMOVNP32rm, X86::CMOVNP64rm },
02743     { X86::CMOVNS16rm, X86::CMOVNS32rm, X86::CMOVNS64rm },
02744     { X86::CMOVO16rm,  X86::CMOVO32rm,  X86::CMOVO64rm  },
02745     { X86::CMOVP16rm,  X86::CMOVP32rm,  X86::CMOVP64rm  },
02746     { X86::CMOVS16rm,  X86::CMOVS32rm,  X86::CMOVS64rm  }
02747   };
02748 
02749   assert(CC < 16 && "Can only handle standard cond codes");
02750   unsigned Idx = HasMemoryOperand ? 16+CC : CC;
02751   switch(RegBytes) {
02752   default: llvm_unreachable("Illegal register size!");
02753   case 2: return Opc[Idx][0];
02754   case 4: return Opc[Idx][1];
02755   case 8: return Opc[Idx][2];
02756   }
02757 }
02758 
02759 bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
02760   if (!MI->isTerminator()) return false;
02761 
02762   // Conditional branch is a special case.
02763   if (MI->isBranch() && !MI->isBarrier())
02764     return true;
02765   if (!MI->isPredicable())
02766     return true;
02767   return !isPredicated(MI);
02768 }
02769 
02770 bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
02771                                  MachineBasicBlock *&TBB,
02772                                  MachineBasicBlock *&FBB,
02773                                  SmallVectorImpl<MachineOperand> &Cond,
02774                                  bool AllowModify) const {
02775   // Start from the bottom of the block and work up, examining the
02776   // terminator instructions.
02777   MachineBasicBlock::iterator I = MBB.end();
02778   MachineBasicBlock::iterator UnCondBrIter = MBB.end();
02779   while (I != MBB.begin()) {
02780     --I;
02781     if (I->isDebugValue())
02782       continue;
02783 
02784     // Working from the bottom, when we see a non-terminator instruction, we're
02785     // done.
02786     if (!isUnpredicatedTerminator(I))
02787       break;
02788 
02789     // A terminator that isn't a branch can't easily be handled by this
02790     // analysis.
02791     if (!I->isBranch())
02792       return true;
02793 
02794     // Handle unconditional branches.
02795     if (I->getOpcode() == X86::JMP_4) {
02796       UnCondBrIter = I;
02797 
02798       if (!AllowModify) {
02799         TBB = I->getOperand(0).getMBB();
02800         continue;
02801       }
02802 
02803       // If the block has any instructions after a JMP, delete them.
02804       while (std::next(I) != MBB.end())
02805         std::next(I)->eraseFromParent();
02806 
02807       Cond.clear();
02808       FBB = nullptr;
02809 
02810       // Delete the JMP if it's equivalent to a fall-through.
02811       if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
02812         TBB = nullptr;
02813         I->eraseFromParent();
02814         I = MBB.end();
02815         UnCondBrIter = MBB.end();
02816         continue;
02817       }
02818 
02819       // TBB is used to indicate the unconditional destination.
02820       TBB = I->getOperand(0).getMBB();
02821       continue;
02822     }
02823 
02824     // Handle conditional branches.
02825     X86::CondCode BranchCode = getCondFromBranchOpc(I->getOpcode());
02826     if (BranchCode == X86::COND_INVALID)
02827       return true;  // Can't handle indirect branch.
02828 
02829     // Working from the bottom, handle the first conditional branch.
02830     if (Cond.empty()) {
02831       MachineBasicBlock *TargetBB = I->getOperand(0).getMBB();
02832       if (AllowModify && UnCondBrIter != MBB.end() &&
02833           MBB.isLayoutSuccessor(TargetBB)) {
02834         // If we can modify the code and it ends in something like:
02835         //
02836         //     jCC L1
02837         //     jmp L2
02838         //   L1:
02839         //     ...
02840         //   L2:
02841         //
02842         // Then we can change this to:
02843         //
02844         //     jnCC L2
02845         //   L1:
02846         //     ...
02847         //   L2:
02848         //
02849         // Which is a bit more efficient.
02850         // We conditionally jump to the fall-through block.
02851         BranchCode = GetOppositeBranchCondition(BranchCode);
02852         unsigned JNCC = GetCondBranchFromCond(BranchCode);
02853         MachineBasicBlock::iterator OldInst = I;
02854 
02855         BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(JNCC))
02856           .addMBB(UnCondBrIter->getOperand(0).getMBB());
02857         BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_4))
02858           .addMBB(TargetBB);
02859 
02860         OldInst->eraseFromParent();
02861         UnCondBrIter->eraseFromParent();
02862 
02863         // Restart the analysis.
02864         UnCondBrIter = MBB.end();
02865         I = MBB.end();
02866         continue;
02867       }
02868 
02869       FBB = TBB;
02870       TBB = I->getOperand(0).getMBB();
02871       Cond.push_back(MachineOperand::CreateImm(BranchCode));
02872       continue;
02873     }
02874 
02875     // Handle subsequent conditional branches. Only handle the case where all
02876     // conditional branches branch to the same destination and their condition
02877     // opcodes fit one of the special multi-branch idioms.
02878     assert(Cond.size() == 1);
02879     assert(TBB);
02880 
02881     // Only handle the case where all conditional branches branch to the same
02882     // destination.
02883     if (TBB != I->getOperand(0).getMBB())
02884       return true;
02885 
02886     // If the conditions are the same, we can leave them alone.
02887     X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
02888     if (OldBranchCode == BranchCode)
02889       continue;
02890 
02891     // If they differ, see if they fit one of the known patterns. Theoretically,
02892     // we could handle more patterns here, but we shouldn't expect to see them
02893     // if instruction selection has done a reasonable job.
02894     if ((OldBranchCode == X86::COND_NP &&
02895          BranchCode == X86::COND_E) ||
02896         (OldBranchCode == X86::COND_E &&
02897          BranchCode == X86::COND_NP))
02898       BranchCode = X86::COND_NP_OR_E;
02899     else if ((OldBranchCode == X86::COND_P &&
02900               BranchCode == X86::COND_NE) ||
02901              (OldBranchCode == X86::COND_NE &&
02902               BranchCode == X86::COND_P))
02903       BranchCode = X86::COND_NE_OR_P;
02904     else
02905       return true;
02906 
02907     // Update the MachineOperand.
02908     Cond[0].setImm(BranchCode);
02909   }
02910 
02911   return false;
02912 }
02913 
02914 unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
02915   MachineBasicBlock::iterator I = MBB.end();
02916   unsigned Count = 0;
02917 
02918   while (I != MBB.begin()) {
02919     --I;
02920     if (I->isDebugValue())
02921       continue;
02922     if (I->getOpcode() != X86::JMP_4 &&
02923         getCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
02924       break;
02925     // Remove the branch.
02926     I->eraseFromParent();
02927     I = MBB.end();
02928     ++Count;
02929   }
02930 
02931   return Count;
02932 }
02933 
02934 unsigned
02935 X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
02936                            MachineBasicBlock *FBB,
02937                            const SmallVectorImpl<MachineOperand> &Cond,
02938                            DebugLoc DL) const {
02939   // Shouldn't be a fall through.
02940   assert(TBB && "InsertBranch must not be told to insert a fallthrough");
02941   assert((Cond.size() == 1 || Cond.size() == 0) &&
02942          "X86 branch conditions have one component!");
02943 
02944   if (Cond.empty()) {
02945     // Unconditional branch?
02946     assert(!FBB && "Unconditional branch with multiple successors!");
02947     BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(TBB);
02948     return 1;
02949   }
02950 
02951   // Conditional branch.
02952   unsigned Count = 0;
02953   X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
02954   switch (CC) {
02955   case X86::COND_NP_OR_E:
02956     // Synthesize NP_OR_E with two branches.
02957     BuildMI(&MBB, DL, get(X86::JNP_4)).addMBB(TBB);
02958     ++Count;
02959     BuildMI(&MBB, DL, get(X86::JE_4)).addMBB(TBB);
02960     ++Count;
02961     break;
02962   case X86::COND_NE_OR_P:
02963     // Synthesize NE_OR_P with two branches.
02964     BuildMI(&MBB, DL, get(X86::JNE_4)).addMBB(TBB);
02965     ++Count;
02966     BuildMI(&MBB, DL, get(X86::JP_4)).addMBB(TBB);
02967     ++Count;
02968     break;
02969   default: {
02970     unsigned Opc = GetCondBranchFromCond(CC);
02971     BuildMI(&MBB, DL, get(Opc)).addMBB(TBB);
02972     ++Count;
02973   }
02974   }
02975   if (FBB) {
02976     // Two-way Conditional branch. Insert the second branch.
02977     BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(FBB);
02978     ++Count;
02979   }
02980   return Count;
02981 }
02982 
02983 bool X86InstrInfo::
02984 canInsertSelect(const MachineBasicBlock &MBB,
02985                 const SmallVectorImpl<MachineOperand> &Cond,
02986                 unsigned TrueReg, unsigned FalseReg,
02987                 int &CondCycles, int &TrueCycles, int &FalseCycles) const {
02988   // Not all subtargets have cmov instructions.
02989   if (!Subtarget.hasCMov())
02990     return false;
02991   if (Cond.size() != 1)
02992     return false;
02993   // We cannot do the composite conditions, at least not in SSA form.
02994   if ((X86::CondCode)Cond[0].getImm() > X86::COND_S)
02995     return false;
02996 
02997   // Check register classes.
02998   const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
02999   const TargetRegisterClass *RC =
03000     RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
03001   if (!RC)
03002     return false;
03003 
03004   // We have cmov instructions for 16, 32, and 64 bit general purpose registers.
03005   if (X86::GR16RegClass.hasSubClassEq(RC) ||
03006       X86::GR32RegClass.hasSubClassEq(RC) ||
03007       X86::GR64RegClass.hasSubClassEq(RC)) {
03008     // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy
03009     // Bridge. Probably Ivy Bridge as well.
03010     CondCycles = 2;
03011     TrueCycles = 2;
03012     FalseCycles = 2;
03013     return true;
03014   }
03015 
03016   // Can't do vectors.
03017   return false;
03018 }
03019 
03020 void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
03021                                 MachineBasicBlock::iterator I, DebugLoc DL,
03022                                 unsigned DstReg,
03023                                 const SmallVectorImpl<MachineOperand> &Cond,
03024                                 unsigned TrueReg, unsigned FalseReg) const {
03025    MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
03026    assert(Cond.size() == 1 && "Invalid Cond array");
03027    unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(),
03028                                   MRI.getRegClass(DstReg)->getSize(),
03029                                   false/*HasMemoryOperand*/);
03030    BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg);
03031 }
03032 
03033 /// isHReg - Test if the given register is a physical h register.
03034 static bool isHReg(unsigned Reg) {
03035   return X86::GR8_ABCD_HRegClass.contains(Reg);
03036 }
03037 
03038 // Try and copy between VR128/VR64 and GR64 registers.
03039 static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
03040                                         const X86Subtarget &Subtarget) {
03041 
03042   // SrcReg(VR128) -> DestReg(GR64)
03043   // SrcReg(VR64)  -> DestReg(GR64)
03044   // SrcReg(GR64)  -> DestReg(VR128)
03045   // SrcReg(GR64)  -> DestReg(VR64)
03046 
03047   bool HasAVX = Subtarget.hasAVX();
03048   bool HasAVX512 = Subtarget.hasAVX512();
03049   if (X86::GR64RegClass.contains(DestReg)) {
03050     if (X86::VR128XRegClass.contains(SrcReg))
03051       // Copy from a VR128 register to a GR64 register.
03052       return HasAVX512 ? X86::VMOVPQIto64Zrr: (HasAVX ? X86::VMOVPQIto64rr :
03053                                                X86::MOVPQIto64rr);
03054     if (X86::VR64RegClass.contains(SrcReg))
03055       // Copy from a VR64 register to a GR64 register.
03056       return X86::MOVSDto64rr;
03057   } else if (X86::GR64RegClass.contains(SrcReg)) {
03058     // Copy from a GR64 register to a VR128 register.
03059     if (X86::VR128XRegClass.contains(DestReg))
03060       return HasAVX512 ? X86::VMOV64toPQIZrr: (HasAVX ? X86::VMOV64toPQIrr :
03061                                                X86::MOV64toPQIrr);
03062     // Copy from a GR64 register to a VR64 register.
03063     if (X86::VR64RegClass.contains(DestReg))
03064       return X86::MOV64toSDrr;
03065   }
03066 
03067   // SrcReg(FR32) -> DestReg(GR32)
03068   // SrcReg(GR32) -> DestReg(FR32)
03069 
03070   if (X86::GR32RegClass.contains(DestReg) && X86::FR32XRegClass.contains(SrcReg))
03071     // Copy from a FR32 register to a GR32 register.
03072     return HasAVX512 ? X86::VMOVSS2DIZrr : (HasAVX ? X86::VMOVSS2DIrr : X86::MOVSS2DIrr);
03073 
03074   if (X86::FR32XRegClass.contains(DestReg) && X86::GR32RegClass.contains(SrcReg))
03075     // Copy from a GR32 register to a FR32 register.
03076     return HasAVX512 ? X86::VMOVDI2SSZrr : (HasAVX ? X86::VMOVDI2SSrr : X86::MOVDI2SSrr);
03077   return 0;
03078 }
03079 
03080 inline static bool MaskRegClassContains(unsigned Reg) {
03081   return X86::VK8RegClass.contains(Reg) ||
03082          X86::VK16RegClass.contains(Reg) ||
03083          X86::VK32RegClass.contains(Reg) ||
03084          X86::VK64RegClass.contains(Reg) ||
03085          X86::VK1RegClass.contains(Reg);
03086 }
03087 static
03088 unsigned copyPhysRegOpcode_AVX512(unsigned& DestReg, unsigned& SrcReg) {
03089   if (X86::VR128XRegClass.contains(DestReg, SrcReg) ||
03090       X86::VR256XRegClass.contains(DestReg, SrcReg) ||
03091       X86::VR512RegClass.contains(DestReg, SrcReg)) {
03092      DestReg = get512BitSuperRegister(DestReg);
03093      SrcReg = get512BitSuperRegister(SrcReg);
03094      return X86::VMOVAPSZrr;
03095   }
03096   if (MaskRegClassContains(DestReg) &&
03097       MaskRegClassContains(SrcReg))
03098     return X86::KMOVWkk;
03099   if (MaskRegClassContains(DestReg) &&
03100       (X86::GR32RegClass.contains(SrcReg) ||
03101        X86::GR16RegClass.contains(SrcReg) ||
03102        X86::GR8RegClass.contains(SrcReg))) {
03103     SrcReg = getX86SubSuperRegister(SrcReg, MVT::i32);
03104     return X86::KMOVWkr;
03105   }
03106   if ((X86::GR32RegClass.contains(DestReg) ||
03107        X86::GR16RegClass.contains(DestReg) ||
03108        X86::GR8RegClass.contains(DestReg)) &&
03109        MaskRegClassContains(SrcReg)) {
03110     DestReg = getX86SubSuperRegister(DestReg, MVT::i32);
03111     return X86::KMOVWrk;
03112   }
03113   return 0;
03114 }
03115 
03116 void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
03117                                MachineBasicBlock::iterator MI, DebugLoc DL,
03118                                unsigned DestReg, unsigned SrcReg,
03119                                bool KillSrc) const {
03120   // First deal with the normal symmetric copies.
03121   bool HasAVX = Subtarget.hasAVX();
03122   bool HasAVX512 = Subtarget.hasAVX512();
03123   unsigned Opc = 0;
03124   if (X86::GR64RegClass.contains(DestReg, SrcReg))
03125     Opc = X86::MOV64rr;
03126   else if (X86::GR32RegClass.contains(DestReg, SrcReg))
03127     Opc = X86::MOV32rr;
03128   else if (X86::GR16RegClass.contains(DestReg, SrcReg))
03129     Opc = X86::MOV16rr;
03130   else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
03131     // Copying to or from a physical H register on x86-64 requires a NOREX
03132     // move.  Otherwise use a normal move.
03133     if ((isHReg(DestReg) || isHReg(SrcReg)) &&
03134         Subtarget.is64Bit()) {
03135       Opc = X86::MOV8rr_NOREX;
03136       // Both operands must be encodable without an REX prefix.
03137       assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) &&
03138              "8-bit H register can not be copied outside GR8_NOREX");
03139     } else
03140       Opc = X86::MOV8rr;
03141   }
03142   else if (X86::VR64RegClass.contains(DestReg, SrcReg))
03143     Opc = X86::MMX_MOVQ64rr;
03144   else if (HasAVX512)
03145     Opc = copyPhysRegOpcode_AVX512(DestReg, SrcReg);
03146   else if (X86::VR128RegClass.contains(DestReg, SrcReg))
03147     Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
03148   else if (X86::VR256RegClass.contains(DestReg, SrcReg))
03149     Opc = X86::VMOVAPSYrr;
03150   if (!Opc)
03151     Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget);
03152 
03153   if (Opc) {
03154     BuildMI(MBB, MI, DL, get(Opc), DestReg)
03155       .addReg(SrcReg, getKillRegState(KillSrc));
03156     return;
03157   }
03158 
03159   // Moving EFLAGS to / from another register requires a push and a pop.
03160   // Notice that we have to adjust the stack if we don't want to clobber the
03161   // first frame index. See X86FrameLowering.cpp - clobbersTheStack.
03162   if (SrcReg == X86::EFLAGS) {
03163     if (X86::GR64RegClass.contains(DestReg)) {
03164       BuildMI(MBB, MI, DL, get(X86::PUSHF64));
03165       BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg);
03166       return;
03167     }
03168     if (X86::GR32RegClass.contains(DestReg)) {
03169       BuildMI(MBB, MI, DL, get(X86::PUSHF32));
03170       BuildMI(MBB, MI, DL, get(X86::POP32r), DestReg);
03171       return;
03172     }
03173   }
03174   if (DestReg == X86::EFLAGS) {
03175     if (X86::GR64RegClass.contains(SrcReg)) {
03176       BuildMI(MBB, MI, DL, get(X86::PUSH64r))
03177         .addReg(SrcReg, getKillRegState(KillSrc));
03178       BuildMI(MBB, MI, DL, get(X86::POPF64));
03179       return;
03180     }
03181     if (X86::GR32RegClass.contains(SrcReg)) {
03182       BuildMI(MBB, MI, DL, get(X86::PUSH32r))
03183         .addReg(SrcReg, getKillRegState(KillSrc));
03184       BuildMI(MBB, MI, DL, get(X86::POPF32));
03185       return;
03186     }
03187   }
03188 
03189   DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg)
03190                << " to " << RI.getName(DestReg) << '\n');
03191   llvm_unreachable("Cannot emit physreg copy instruction");
03192 }
03193 
03194 static unsigned getLoadStoreRegOpcode(unsigned Reg,
03195                                       const TargetRegisterClass *RC,
03196                                       bool isStackAligned,
03197                                       const X86Subtarget &STI,
03198                                       bool load) {
03199   if (STI.hasAVX512()) {
03200     if (X86::VK8RegClass.hasSubClassEq(RC)  ||
03201       X86::VK16RegClass.hasSubClassEq(RC))
03202       return load ? X86::KMOVWkm : X86::KMOVWmk;
03203     if (RC->getSize() == 4 && X86::FR32XRegClass.hasSubClassEq(RC))
03204       return load ? X86::VMOVSSZrm : X86::VMOVSSZmr;
03205     if (RC->getSize() == 8 && X86::FR64XRegClass.hasSubClassEq(RC))
03206       return load ? X86::VMOVSDZrm : X86::VMOVSDZmr;
03207     if (X86::VR512RegClass.hasSubClassEq(RC))
03208       return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
03209   }
03210 
03211   bool HasAVX = STI.hasAVX();
03212   switch (RC->getSize()) {
03213   default:
03214     llvm_unreachable("Unknown spill size");
03215   case 1:
03216     assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass");
03217     if (STI.is64Bit())
03218       // Copying to or from a physical H register on x86-64 requires a NOREX
03219       // move.  Otherwise use a normal move.
03220       if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
03221         return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
03222     return load ? X86::MOV8rm : X86::MOV8mr;
03223   case 2:
03224     assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass");
03225     return load ? X86::MOV16rm : X86::MOV16mr;
03226   case 4:
03227     if (X86::GR32RegClass.hasSubClassEq(RC))
03228       return load ? X86::MOV32rm : X86::MOV32mr;
03229     if (X86::FR32RegClass.hasSubClassEq(RC))
03230       return load ?
03231         (HasAVX ? X86::VMOVSSrm : X86::MOVSSrm) :
03232         (HasAVX ? X86::VMOVSSmr : X86::MOVSSmr);
03233     if (X86::RFP32RegClass.hasSubClassEq(RC))
03234       return load ? X86::LD_Fp32m : X86::ST_Fp32m;
03235     llvm_unreachable("Unknown 4-byte regclass");
03236   case 8:
03237     if (X86::GR64RegClass.hasSubClassEq(RC))
03238       return load ? X86::MOV64rm : X86::MOV64mr;
03239     if (X86::FR64RegClass.hasSubClassEq(RC))
03240       return load ?
03241         (HasAVX ? X86::VMOVSDrm : X86::MOVSDrm) :
03242         (HasAVX ? X86::VMOVSDmr : X86::MOVSDmr);
03243     if (X86::VR64RegClass.hasSubClassEq(RC))
03244       return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
03245     if (X86::RFP64RegClass.hasSubClassEq(RC))
03246       return load ? X86::LD_Fp64m : X86::ST_Fp64m;
03247     llvm_unreachable("Unknown 8-byte regclass");
03248   case 10:
03249     assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass");
03250     return load ? X86::LD_Fp80m : X86::ST_FpP80m;
03251   case 16: {
03252     assert((X86::VR128RegClass.hasSubClassEq(RC) ||
03253             X86::VR128XRegClass.hasSubClassEq(RC))&& "Unknown 16-byte regclass");
03254     // If stack is realigned we can use aligned stores.
03255     if (isStackAligned)
03256       return load ?
03257         (HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm) :
03258         (HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
03259     else
03260       return load ?
03261         (HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm) :
03262         (HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
03263   }
03264   case 32:
03265     assert((X86::VR256RegClass.hasSubClassEq(RC) ||
03266             X86::VR256XRegClass.hasSubClassEq(RC)) && "Unknown 32-byte regclass");
03267     // If stack is realigned we can use aligned stores.
03268     if (isStackAligned)
03269       return load ? X86::VMOVAPSYrm : X86::VMOVAPSYmr;
03270     else
03271       return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr;
03272   case 64:
03273     assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass");
03274     if (isStackAligned)
03275       return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
03276     else
03277       return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
03278   }
03279 }
03280 
03281 static unsigned getStoreRegOpcode(unsigned SrcReg,
03282                                   const TargetRegisterClass *RC,
03283                                   bool isStackAligned,
03284                                   const X86Subtarget &STI) {
03285   return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, STI, false);
03286 }
03287 
03288 
03289 static unsigned getLoadRegOpcode(unsigned DestReg,
03290                                  const TargetRegisterClass *RC,
03291                                  bool isStackAligned,
03292                                  const X86Subtarget &STI) {
03293   return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, STI, true);
03294 }
03295 
03296 void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
03297                                        MachineBasicBlock::iterator MI,
03298                                        unsigned SrcReg, bool isKill, int FrameIdx,
03299                                        const TargetRegisterClass *RC,
03300                                        const TargetRegisterInfo *TRI) const {
03301   const MachineFunction &MF = *MBB.getParent();
03302   assert(MF.getFrameInfo()->getObjectSize(FrameIdx) >= RC->getSize() &&
03303          "Stack slot too small for store");
03304   unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
03305   bool isAligned = (MF.getTarget()
03306                         .getSubtargetImpl()
03307                         ->getFrameLowering()
03308                         ->getStackAlignment() >= Alignment) ||
03309                    RI.canRealignStack(MF);
03310   unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
03311   DebugLoc DL = MBB.findDebugLoc(MI);
03312   addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx)
03313     .addReg(SrcReg, getKillRegState(isKill));
03314 }
03315 
03316 void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
03317                                   bool isKill,
03318                                   SmallVectorImpl<MachineOperand> &Addr,
03319                                   const TargetRegisterClass *RC,
03320                                   MachineInstr::mmo_iterator MMOBegin,
03321                                   MachineInstr::mmo_iterator MMOEnd,
03322                                   SmallVectorImpl<MachineInstr*> &NewMIs) const {
03323   unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
03324   bool isAligned = MMOBegin != MMOEnd &&
03325                    (*MMOBegin)->getAlignment() >= Alignment;
03326   unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
03327   DebugLoc DL;
03328   MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
03329   for (unsigned i = 0, e = Addr.size(); i != e; ++i)
03330     MIB.addOperand(Addr[i]);
03331   MIB.addReg(SrcReg, getKillRegState(isKill));
03332   (*MIB).setMemRefs(MMOBegin, MMOEnd);
03333   NewMIs.push_back(MIB);
03334 }
03335 
03336 
03337 void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
03338                                         MachineBasicBlock::iterator MI,
03339                                         unsigned DestReg, int FrameIdx,
03340                                         const TargetRegisterClass *RC,
03341                                         const TargetRegisterInfo *TRI) const {
03342   const MachineFunction &MF = *MBB.getParent();
03343   unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
03344   bool isAligned = (MF.getTarget()
03345                         .getSubtargetImpl()
03346                         ->getFrameLowering()
03347                         ->getStackAlignment() >= Alignment) ||
03348                    RI.canRealignStack(MF);
03349   unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
03350   DebugLoc DL = MBB.findDebugLoc(MI);
03351   addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
03352 }
03353 
03354 void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
03355                                  SmallVectorImpl<MachineOperand> &Addr,
03356                                  const TargetRegisterClass *RC,
03357                                  MachineInstr::mmo_iterator MMOBegin,
03358                                  MachineInstr::mmo_iterator MMOEnd,
03359                                  SmallVectorImpl<MachineInstr*> &NewMIs) const {
03360   unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
03361   bool isAligned = MMOBegin != MMOEnd &&
03362                    (*MMOBegin)->getAlignment() >= Alignment;
03363   unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
03364   DebugLoc DL;
03365   MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
03366   for (unsigned i = 0, e = Addr.size(); i != e; ++i)
03367     MIB.addOperand(Addr[i]);
03368   (*MIB).setMemRefs(MMOBegin, MMOEnd);
03369   NewMIs.push_back(MIB);
03370 }
03371 
03372 bool X86InstrInfo::
03373 analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
03374                int &CmpMask, int &CmpValue) const {
03375   switch (MI->getOpcode()) {
03376   default: break;
03377   case X86::CMP64ri32:
03378   case X86::CMP64ri8:
03379   case X86::CMP32ri:
03380   case X86::CMP32ri8:
03381   case X86::CMP16ri:
03382   case X86::CMP16ri8:
03383   case X86::CMP8ri:
03384     SrcReg = MI->getOperand(0).getReg();
03385     SrcReg2 = 0;
03386     CmpMask = ~0;
03387     CmpValue = MI->getOperand(1).getImm();
03388     return true;
03389   // A SUB can be used to perform comparison.
03390   case X86::SUB64rm:
03391   case X86::SUB32rm:
03392   case X86::SUB16rm:
03393   case X86::SUB8rm:
03394     SrcReg = MI->getOperand(1).getReg();
03395     SrcReg2 = 0;
03396     CmpMask = ~0;
03397     CmpValue = 0;
03398     return true;
03399   case X86::SUB64rr:
03400   case X86::SUB32rr:
03401   case X86::SUB16rr:
03402   case X86::SUB8rr:
03403     SrcReg = MI->getOperand(1).getReg();
03404     SrcReg2 = MI->getOperand(2).getReg();
03405     CmpMask = ~0;
03406     CmpValue = 0;
03407     return true;
03408   case X86::SUB64ri32:
03409   case X86::SUB64ri8:
03410   case X86::SUB32ri:
03411   case X86::SUB32ri8:
03412   case X86::SUB16ri:
03413   case X86::SUB16ri8:
03414   case X86::SUB8ri:
03415     SrcReg = MI->getOperand(1).getReg();
03416     SrcReg2 = 0;
03417     CmpMask = ~0;
03418     CmpValue = MI->getOperand(2).getImm();
03419     return true;
03420   case X86::CMP64rr:
03421   case X86::CMP32rr:
03422   case X86::CMP16rr:
03423   case X86::CMP8rr:
03424     SrcReg = MI->getOperand(0).getReg();
03425     SrcReg2 = MI->getOperand(1).getReg();
03426     CmpMask = ~0;
03427     CmpValue = 0;
03428     return true;
03429   case X86::TEST8rr:
03430   case X86::TEST16rr:
03431   case X86::TEST32rr:
03432   case X86::TEST64rr:
03433     SrcReg = MI->getOperand(0).getReg();
03434     if (MI->getOperand(1).getReg() != SrcReg) return false;
03435     // Compare against zero.
03436     SrcReg2 = 0;
03437     CmpMask = ~0;
03438     CmpValue = 0;
03439     return true;
03440   }
03441   return false;
03442 }
03443 
03444 /// isRedundantFlagInstr - check whether the first instruction, whose only
03445 /// purpose is to update flags, can be made redundant.
03446 /// CMPrr can be made redundant by SUBrr if the operands are the same.
03447 /// This function can be extended later on.
03448 /// SrcReg, SrcRegs: register operands for FlagI.
03449 /// ImmValue: immediate for FlagI if it takes an immediate.
03450 inline static bool isRedundantFlagInstr(MachineInstr *FlagI, unsigned SrcReg,
03451                                         unsigned SrcReg2, int ImmValue,
03452                                         MachineInstr *OI) {
03453   if (((FlagI->getOpcode() == X86::CMP64rr &&
03454         OI->getOpcode() == X86::SUB64rr) ||
03455        (FlagI->getOpcode() == X86::CMP32rr &&
03456         OI->getOpcode() == X86::SUB32rr)||
03457        (FlagI->getOpcode() == X86::CMP16rr &&
03458         OI->getOpcode() == X86::SUB16rr)||
03459        (FlagI->getOpcode() == X86::CMP8rr &&
03460         OI->getOpcode() == X86::SUB8rr)) &&
03461       ((OI->getOperand(1).getReg() == SrcReg &&
03462         OI->getOperand(2).getReg() == SrcReg2) ||
03463        (OI->getOperand(1).getReg() == SrcReg2 &&
03464         OI->getOperand(2).getReg() == SrcReg)))
03465     return true;
03466 
03467   if (((FlagI->getOpcode() == X86::CMP64ri32 &&
03468         OI->getOpcode() == X86::SUB64ri32) ||
03469        (FlagI->getOpcode() == X86::CMP64ri8 &&
03470         OI->getOpcode() == X86::SUB64ri8) ||
03471        (FlagI->getOpcode() == X86::CMP32ri &&
03472         OI->getOpcode() == X86::SUB32ri) ||
03473        (FlagI->getOpcode() == X86::CMP32ri8 &&
03474         OI->getOpcode() == X86::SUB32ri8) ||
03475        (FlagI->getOpcode() == X86::CMP16ri &&
03476         OI->getOpcode() == X86::SUB16ri) ||
03477        (FlagI->getOpcode() == X86::CMP16ri8 &&
03478         OI->getOpcode() == X86::SUB16ri8) ||
03479        (FlagI->getOpcode() == X86::CMP8ri &&
03480         OI->getOpcode() == X86::SUB8ri)) &&
03481       OI->getOperand(1).getReg() == SrcReg &&
03482       OI->getOperand(2).getImm() == ImmValue)
03483     return true;
03484   return false;
03485 }
03486 
03487 /// isDefConvertible - check whether the definition can be converted
03488 /// to remove a comparison against zero.
03489 inline static bool isDefConvertible(MachineInstr *MI) {
03490   switch (MI->getOpcode()) {
03491   default: return false;
03492 
03493   // The shift instructions only modify ZF if their shift count is non-zero.
03494   // N.B.: The processor truncates the shift count depending on the encoding.
03495   case X86::SAR8ri:    case X86::SAR16ri:  case X86::SAR32ri:case X86::SAR64ri:
03496   case X86::SHR8ri:    case X86::SHR16ri:  case X86::SHR32ri:case X86::SHR64ri:
03497      return getTruncatedShiftCount(MI, 2) != 0;
03498 
03499   // Some left shift instructions can be turned into LEA instructions but only
03500   // if their flags aren't used. Avoid transforming such instructions.
03501   case X86::SHL8ri:    case X86::SHL16ri:  case X86::SHL32ri:case X86::SHL64ri:{
03502     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
03503     if (isTruncatedShiftCountForLEA(ShAmt)) return false;
03504     return ShAmt != 0;
03505   }
03506 
03507   case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8:
03508   case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8:
03509      return getTruncatedShiftCount(MI, 3) != 0;
03510 
03511   case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri:
03512   case X86::SUB32ri8:  case X86::SUB16ri:  case X86::SUB16ri8:
03513   case X86::SUB8ri:    case X86::SUB64rr:  case X86::SUB32rr:
03514   case X86::SUB16rr:   case X86::SUB8rr:   case X86::SUB64rm:
03515   case X86::SUB32rm:   case X86::SUB16rm:  case X86::SUB8rm:
03516   case X86::DEC64r:    case X86::DEC32r:   case X86::DEC16r: case X86::DEC8r:
03517   case X86::DEC64_32r: case X86::DEC64_16r:
03518   case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri:
03519   case X86::ADD32ri8:  case X86::ADD16ri:  case X86::ADD16ri8:
03520   case X86::ADD8ri:    case X86::ADD64rr:  case X86::ADD32rr:
03521   case X86::ADD16rr:   case X86::ADD8rr:   case X86::ADD64rm:
03522   case X86::ADD32rm:   case X86::ADD16rm:  case X86::ADD8rm:
03523   case X86::INC64r:    case X86::INC32r:   case X86::INC16r: case X86::INC8r:
03524   case X86::INC64_32r: case X86::INC64_16r:
03525   case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri:
03526   case X86::AND32ri8:  case X86::AND16ri:  case X86::AND16ri8:
03527   case X86::AND8ri:    case X86::AND64rr:  case X86::AND32rr:
03528   case X86::AND16rr:   case X86::AND8rr:   case X86::AND64rm:
03529   case X86::AND32rm:   case X86::AND16rm:  case X86::AND8rm:
03530   case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri:
03531   case X86::XOR32ri8:  case X86::XOR16ri:  case X86::XOR16ri8:
03532   case X86::XOR8ri:    case X86::XOR64rr:  case X86::XOR32rr:
03533   case X86::XOR16rr:   case X86::XOR8rr:   case X86::XOR64rm:
03534   case X86::XOR32rm:   case X86::XOR16rm:  case X86::XOR8rm:
03535   case X86::OR64ri32:  case X86::OR64ri8:  case X86::OR32ri:
03536   case X86::OR32ri8:   case X86::OR16ri:   case X86::OR16ri8:
03537   case X86::OR8ri:     case X86::OR64rr:   case X86::OR32rr:
03538   case X86::OR16rr:    case X86::OR8rr:    case X86::OR64rm:
03539   case X86::OR32rm:    case X86::OR16rm:   case X86::OR8rm:
03540   case X86::NEG8r:     case X86::NEG16r:   case X86::NEG32r: case X86::NEG64r:
03541   case X86::SAR8r1:    case X86::SAR16r1:  case X86::SAR32r1:case X86::SAR64r1:
03542   case X86::SHR8r1:    case X86::SHR16r1:  case X86::SHR32r1:case X86::SHR64r1:
03543   case X86::SHL8r1:    case X86::SHL16r1:  case X86::SHL32r1:case X86::SHL64r1:
03544   case X86::ADC32ri:   case X86::ADC32ri8:
03545   case X86::ADC32rr:   case X86::ADC64ri32:
03546   case X86::ADC64ri8:  case X86::ADC64rr:
03547   case X86::SBB32ri:   case X86::SBB32ri8:
03548   case X86::SBB32rr:   case X86::SBB64ri32:
03549   case X86::SBB64ri8:  case X86::SBB64rr:
03550   case X86::ANDN32rr:  case X86::ANDN32rm:
03551   case X86::ANDN64rr:  case X86::ANDN64rm:
03552   case X86::BEXTR32rr: case X86::BEXTR64rr:
03553   case X86::BEXTR32rm: case X86::BEXTR64rm:
03554   case X86::BLSI32rr:  case X86::BLSI32rm:
03555   case X86::BLSI64rr:  case X86::BLSI64rm:
03556   case X86::BLSMSK32rr:case X86::BLSMSK32rm:
03557   case X86::BLSMSK64rr:case X86::BLSMSK64rm:
03558   case X86::BLSR32rr:  case X86::BLSR32rm:
03559   case X86::BLSR64rr:  case X86::BLSR64rm:
03560   case X86::BZHI32rr:  case X86::BZHI32rm:
03561   case X86::BZHI64rr:  case X86::BZHI64rm:
03562   case X86::LZCNT16rr: case X86::LZCNT16rm:
03563   case X86::LZCNT32rr: case X86::LZCNT32rm:
03564   case X86::LZCNT64rr: case X86::LZCNT64rm:
03565   case X86::POPCNT16rr:case X86::POPCNT16rm:
03566   case X86::POPCNT32rr:case X86::POPCNT32rm:
03567   case X86::POPCNT64rr:case X86::POPCNT64rm:
03568   case X86::TZCNT16rr: case X86::TZCNT16rm:
03569   case X86::TZCNT32rr: case X86::TZCNT32rm:
03570   case X86::TZCNT64rr: case X86::TZCNT64rm:
03571     return true;
03572   }
03573 }
03574 
03575 /// isUseDefConvertible - check whether the use can be converted
03576 /// to remove a comparison against zero.
03577 static X86::CondCode isUseDefConvertible(MachineInstr *MI) {
03578   switch (MI->getOpcode()) {
03579   default: return X86::COND_INVALID;
03580   case X86::LZCNT16rr: case X86::LZCNT16rm:
03581   case X86::LZCNT32rr: case X86::LZCNT32rm:
03582   case X86::LZCNT64rr: case X86::LZCNT64rm:
03583     return X86::COND_B;
03584   case X86::POPCNT16rr:case X86::POPCNT16rm:
03585   case X86::POPCNT32rr:case X86::POPCNT32rm:
03586   case X86::POPCNT64rr:case X86::POPCNT64rm:
03587     return X86::COND_E;
03588   case X86::TZCNT16rr: case X86::TZCNT16rm:
03589   case X86::TZCNT32rr: case X86::TZCNT32rm:
03590   case X86::TZCNT64rr: case X86::TZCNT64rm:
03591     return X86::COND_B;
03592   }
03593 }
03594 
03595 /// optimizeCompareInstr - Check if there exists an earlier instruction that
03596 /// operates on the same source operands and sets flags in the same way as
03597 /// Compare; remove Compare if possible.
03598 bool X86InstrInfo::
03599 optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
03600                      int CmpMask, int CmpValue,
03601                      const MachineRegisterInfo *MRI) const {
03602   // Check whether we can replace SUB with CMP.
03603   unsigned NewOpcode = 0;
03604   switch (CmpInstr->getOpcode()) {
03605   default: break;
03606   case X86::SUB64ri32:
03607   case X86::SUB64ri8:
03608   case X86::SUB32ri:
03609   case X86::SUB32ri8:
03610   case X86::SUB16ri:
03611   case X86::SUB16ri8:
03612   case X86::SUB8ri:
03613   case X86::SUB64rm:
03614   case X86::SUB32rm:
03615   case X86::SUB16rm:
03616   case X86::SUB8rm:
03617   case X86::SUB64rr:
03618   case X86::SUB32rr:
03619   case X86::SUB16rr:
03620   case X86::SUB8rr: {
03621     if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
03622       return false;
03623     // There is no use of the destination register, we can replace SUB with CMP.
03624     switch (CmpInstr->getOpcode()) {
03625     default: llvm_unreachable("Unreachable!");
03626     case X86::SUB64rm:   NewOpcode = X86::CMP64rm;   break;
03627     case X86::SUB32rm:   NewOpcode = X86::CMP32rm;   break;
03628     case X86::SUB16rm:   NewOpcode = X86::CMP16rm;   break;
03629     case X86::SUB8rm:    NewOpcode = X86::CMP8rm;    break;
03630     case X86::SUB64rr:   NewOpcode = X86::CMP64rr;   break;
03631     case X86::SUB32rr:   NewOpcode = X86::CMP32rr;   break;
03632     case X86::SUB16rr:   NewOpcode = X86::CMP16rr;   break;
03633     case X86::SUB8rr:    NewOpcode = X86::CMP8rr;    break;
03634     case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break;
03635     case X86::SUB64ri8:  NewOpcode = X86::CMP64ri8;  break;
03636     case X86::SUB32ri:   NewOpcode = X86::CMP32ri;   break;
03637     case X86::SUB32ri8:  NewOpcode = X86::CMP32ri8;  break;
03638     case X86::SUB16ri:   NewOpcode = X86::CMP16ri;   break;
03639     case X86::SUB16ri8:  NewOpcode = X86::CMP16ri8;  break;
03640     case X86::SUB8ri:    NewOpcode = X86::CMP8ri;    break;
03641     }
03642     CmpInstr->setDesc(get(NewOpcode));
03643     CmpInstr->RemoveOperand(0);
03644     // Fall through to optimize Cmp if Cmp is CMPrr or CMPri.
03645     if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
03646         NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
03647       return false;
03648   }
03649   }
03650 
03651   // Get the unique definition of SrcReg.
03652   MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
03653   if (!MI) return false;
03654 
03655   // CmpInstr is the first instruction of the BB.
03656   MachineBasicBlock::iterator I = CmpInstr, Def = MI;
03657 
03658   // If we are comparing against zero, check whether we can use MI to update
03659   // EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize.
03660   bool IsCmpZero = (SrcReg2 == 0 && CmpValue == 0);
03661   if (IsCmpZero && MI->getParent() != CmpInstr->getParent())
03662     return false;
03663 
03664   // If we have a use of the source register between the def and our compare
03665   // instruction we can eliminate the compare iff the use sets EFLAGS in the
03666   // right way.
03667   bool ShouldUpdateCC = false;
03668   X86::CondCode NewCC = X86::COND_INVALID;
03669   if (IsCmpZero && !isDefConvertible(MI)) {
03670     // Scan forward from the use until we hit the use we're looking for or the
03671     // compare instruction.
03672     for (MachineBasicBlock::iterator J = MI;; ++J) {
03673       // Do we have a convertible instruction?
03674       NewCC = isUseDefConvertible(J);
03675       if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() &&
03676           J->getOperand(1).getReg() == SrcReg) {
03677         assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!");
03678         ShouldUpdateCC = true; // Update CC later on.
03679         // This is not a def of SrcReg, but still a def of EFLAGS. Keep going
03680         // with the new def.
03681         MI = Def = J;
03682         break;
03683       }
03684 
03685       if (J == I)
03686         return false;
03687     }
03688   }
03689 
03690   // We are searching for an earlier instruction that can make CmpInstr
03691   // redundant and that instruction will be saved in Sub.
03692   MachineInstr *Sub = nullptr;
03693   const TargetRegisterInfo *TRI = &getRegisterInfo();
03694 
03695   // We iterate backward, starting from the instruction before CmpInstr and
03696   // stop when reaching the definition of a source register or done with the BB.
03697   // RI points to the instruction before CmpInstr.
03698   // If the definition is in this basic block, RE points to the definition;
03699   // otherwise, RE is the rend of the basic block.
03700   MachineBasicBlock::reverse_iterator
03701       RI = MachineBasicBlock::reverse_iterator(I),
03702       RE = CmpInstr->getParent() == MI->getParent() ?
03703            MachineBasicBlock::reverse_iterator(++Def) /* points to MI */ :
03704            CmpInstr->getParent()->rend();
03705   MachineInstr *Movr0Inst = nullptr;
03706   for (; RI != RE; ++RI) {
03707     MachineInstr *Instr = &*RI;
03708     // Check whether CmpInstr can be made redundant by the current instruction.
03709     if (!IsCmpZero &&
03710         isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpValue, Instr)) {
03711       Sub = Instr;
03712       break;
03713     }
03714 
03715     if (Instr->modifiesRegister(X86::EFLAGS, TRI) ||
03716         Instr->readsRegister(X86::EFLAGS, TRI)) {
03717       // This instruction modifies or uses EFLAGS.
03718 
03719       // MOV32r0 etc. are implemented with xor which clobbers condition code.
03720       // They are safe to move up, if the definition to EFLAGS is dead and
03721       // earlier instructions do not read or write EFLAGS.
03722       if (!Movr0Inst && Instr->getOpcode() == X86::MOV32r0 &&
03723           Instr->registerDefIsDead(X86::EFLAGS, TRI)) {
03724         Movr0Inst = Instr;
03725         continue;
03726       }
03727 
03728       // We can't remove CmpInstr.
03729       return false;
03730     }
03731   }
03732 
03733   // Return false if no candidates exist.
03734   if (!IsCmpZero && !Sub)
03735     return false;
03736 
03737   bool IsSwapped = (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 &&
03738                     Sub->getOperand(2).getReg() == SrcReg);
03739 
03740   // Scan forward from the instruction after CmpInstr for uses of EFLAGS.
03741   // It is safe to remove CmpInstr if EFLAGS is redefined or killed.
03742   // If we are done with the basic block, we need to check whether EFLAGS is
03743   // live-out.
03744   bool IsSafe = false;
03745   SmallVector<std::pair<MachineInstr*, unsigned /*NewOpc*/>, 4> OpsToUpdate;
03746   MachineBasicBlock::iterator E = CmpInstr->getParent()->end();
03747   for (++I; I != E; ++I) {
03748     const MachineInstr &Instr = *I;
03749     bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI);
03750     bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI);
03751     // We should check the usage if this instruction uses and updates EFLAGS.
03752     if (!UseEFLAGS && ModifyEFLAGS) {
03753       // It is safe to remove CmpInstr if EFLAGS is updated again.
03754       IsSafe = true;
03755       break;
03756     }
03757     if (!UseEFLAGS && !ModifyEFLAGS)
03758       continue;
03759 
03760     // EFLAGS is used by this instruction.
03761     X86::CondCode OldCC = X86::COND_INVALID;
03762     bool OpcIsSET = false;
03763     if (IsCmpZero || IsSwapped) {
03764       // We decode the condition code from opcode.
03765       if (Instr.isBranch())
03766         OldCC = getCondFromBranchOpc(Instr.getOpcode());
03767       else {
03768         OldCC = getCondFromSETOpc(Instr.getOpcode());
03769         if (OldCC != X86::COND_INVALID)
03770           OpcIsSET = true;
03771         else
03772           OldCC = X86::getCondFromCMovOpc(Instr.getOpcode());
03773       }
03774       if (OldCC == X86::COND_INVALID) return false;
03775     }
03776     if (IsCmpZero) {
03777       switch (OldCC) {
03778       default: break;
03779       case X86::COND_A: case X86::COND_AE:
03780       case X86::COND_B: case X86::COND_BE:
03781       case X86::COND_G: case X86::COND_GE:
03782       case X86::COND_L: case X86::COND_LE:
03783       case X86::COND_O: case X86::COND_NO:
03784         // CF and OF are used, we can't perform this optimization.
03785         return false;
03786       }
03787 
03788       // If we're updating the condition code check if we have to reverse the
03789       // condition.
03790       if (ShouldUpdateCC)
03791         switch (OldCC) {
03792         default:
03793           return false;
03794         case X86::COND_E:
03795           break;
03796         case X86::COND_NE:
03797           NewCC = GetOppositeBranchCondition(NewCC);
03798           break;
03799         }
03800     } else if (IsSwapped) {
03801       // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs
03802       // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
03803       // We swap the condition code and synthesize the new opcode.
03804       NewCC = getSwappedCondition(OldCC);
03805       if (NewCC == X86::COND_INVALID) return false;
03806     }
03807 
03808     if ((ShouldUpdateCC || IsSwapped) && NewCC != OldCC) {
03809       // Synthesize the new opcode.
03810       bool HasMemoryOperand = Instr.hasOneMemOperand();
03811       unsigned NewOpc;
03812       if (Instr.isBranch())
03813         NewOpc = GetCondBranchFromCond(NewCC);
03814       else if(OpcIsSET)
03815         NewOpc = getSETFromCond(NewCC, HasMemoryOperand);
03816       else {
03817         unsigned DstReg = Instr.getOperand(0).getReg();
03818         NewOpc = getCMovFromCond(NewCC, MRI->getRegClass(DstReg)->getSize(),
03819                                  HasMemoryOperand);
03820       }
03821 
03822       // Push the MachineInstr to OpsToUpdate.
03823       // If it is safe to remove CmpInstr, the condition code of these
03824       // instructions will be modified.
03825       OpsToUpdate.push_back(std::make_pair(&*I, NewOpc));
03826     }
03827     if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) {
03828       // It is safe to remove CmpInstr if EFLAGS is updated again or killed.
03829       IsSafe = true;
03830       break;
03831     }
03832   }
03833 
03834   // If EFLAGS is not killed nor re-defined, we should check whether it is
03835   // live-out. If it is live-out, do not optimize.
03836   if ((IsCmpZero || IsSwapped) && !IsSafe) {
03837     MachineBasicBlock *MBB = CmpInstr->getParent();
03838     for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
03839              SE = MBB->succ_end(); SI != SE; ++SI)
03840       if ((*SI)->isLiveIn(X86::EFLAGS))
03841         return false;
03842   }
03843 
03844   // The instruction to be updated is either Sub or MI.
03845   Sub = IsCmpZero ? MI : Sub;
03846   // Move Movr0Inst to the appropriate place before Sub.
03847   if (Movr0Inst) {
03848     // Look backwards until we find a def that doesn't use the current EFLAGS.
03849     Def = Sub;
03850     MachineBasicBlock::reverse_iterator
03851       InsertI = MachineBasicBlock::reverse_iterator(++Def),
03852                 InsertE = Sub->getParent()->rend();
03853     for (; InsertI != InsertE; ++InsertI) {
03854       MachineInstr *Instr = &*InsertI;
03855       if (!Instr->readsRegister(X86::EFLAGS, TRI) &&
03856           Instr->modifiesRegister(X86::EFLAGS, TRI)) {
03857         Sub->getParent()->remove(Movr0Inst);
03858         Instr->getParent()->insert(MachineBasicBlock::iterator(Instr),
03859                                    Movr0Inst);
03860         break;
03861       }
03862     }
03863     if (InsertI == InsertE)
03864       return false;
03865   }
03866 
03867   // Make sure Sub instruction defines EFLAGS and mark the def live.
03868   unsigned i = 0, e = Sub->getNumOperands();
03869   for (; i != e; ++i) {
03870     MachineOperand &MO = Sub->getOperand(i);
03871     if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS) {
03872       MO.setIsDead(false);
03873       break;
03874     }
03875   }
03876   assert(i != e && "Unable to locate a def EFLAGS operand");
03877 
03878   CmpInstr->eraseFromParent();
03879 
03880   // Modify the condition code of instructions in OpsToUpdate.
03881   for (unsigned i = 0, e = OpsToUpdate.size(); i < e; i++)
03882     OpsToUpdate[i].first->setDesc(get(OpsToUpdate[i].second));
03883   return true;
03884 }
03885 
03886 /// optimizeLoadInstr - Try to remove the load by folding it to a register
03887 /// operand at the use. We fold the load instructions if load defines a virtual
03888 /// register, the virtual register is used once in the same BB, and the
03889 /// instructions in-between do not load or store, and have no side effects.
03890 MachineInstr* X86InstrInfo::
03891 optimizeLoadInstr(MachineInstr *MI, const MachineRegisterInfo *MRI,
03892                   unsigned &FoldAsLoadDefReg,
03893                   MachineInstr *&DefMI) const {
03894   if (FoldAsLoadDefReg == 0)
03895     return nullptr;
03896   // To be conservative, if there exists another load, clear the load candidate.
03897   if (MI->mayLoad()) {
03898     FoldAsLoadDefReg = 0;
03899     return nullptr;
03900   }
03901 
03902   // Check whether we can move DefMI here.
03903   DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
03904   assert(DefMI);
03905   bool SawStore = false;
03906   if (!DefMI->isSafeToMove(this, nullptr, SawStore))
03907     return nullptr;
03908 
03909   // We try to commute MI if possible.
03910   unsigned IdxEnd = (MI->isCommutable()) ? 2 : 1;
03911   for (unsigned Idx = 0; Idx < IdxEnd; Idx++) {
03912     // Collect information about virtual register operands of MI.
03913     unsigned SrcOperandId = 0;
03914     bool FoundSrcOperand = false;
03915     for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
03916       MachineOperand &MO = MI->getOperand(i);
03917       if (!MO.isReg())
03918         continue;
03919       unsigned Reg = MO.getReg();
03920       if (Reg != FoldAsLoadDefReg)
03921         continue;
03922       // Do not fold if we have a subreg use or a def or multiple uses.
03923       if (MO.getSubReg() || MO.isDef() || FoundSrcOperand)
03924         return nullptr;
03925 
03926       SrcOperandId = i;
03927       FoundSrcOperand = true;
03928     }
03929     if (!FoundSrcOperand) return nullptr;
03930 
03931     // Check whether we can fold the def into SrcOperandId.
03932     SmallVector<unsigned, 8> Ops;
03933     Ops.push_back(SrcOperandId);
03934     MachineInstr *FoldMI = foldMemoryOperand(MI, Ops, DefMI);
03935     if (FoldMI) {
03936       FoldAsLoadDefReg = 0;
03937       return FoldMI;
03938     }
03939 
03940     if (Idx == 1) {
03941       // MI was changed but it didn't help, commute it back!
03942       commuteInstruction(MI, false);
03943       return nullptr;
03944     }
03945 
03946     // Check whether we can commute MI and enable folding.
03947     if (MI->isCommutable()) {
03948       MachineInstr *NewMI = commuteInstruction(MI, false);
03949       // Unable to commute.
03950       if (!NewMI) return nullptr;
03951       if (NewMI != MI) {
03952         // New instruction. It doesn't need to be kept.
03953         NewMI->eraseFromParent();
03954         return nullptr;
03955       }
03956     }
03957   }
03958   return nullptr;
03959 }
03960 
03961 /// Expand2AddrUndef - Expand a single-def pseudo instruction to a two-addr
03962 /// instruction with two undef reads of the register being defined.  This is
03963 /// used for mapping:
03964 ///   %xmm4 = V_SET0
03965 /// to:
03966 ///   %xmm4 = PXORrr %xmm4<undef>, %xmm4<undef>
03967 ///
03968 static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
03969                              const MCInstrDesc &Desc) {
03970   assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
03971   unsigned Reg = MIB->getOperand(0).getReg();
03972   MIB->setDesc(Desc);
03973 
03974   // MachineInstr::addOperand() will insert explicit operands before any
03975   // implicit operands.
03976   MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
03977   // But we don't trust that.
03978   assert(MIB->getOperand(1).getReg() == Reg &&
03979          MIB->getOperand(2).getReg() == Reg && "Misplaced operand");
03980   return true;
03981 }
03982 
03983 // LoadStackGuard has so far only been implemented for 64-bit MachO. Different
03984 // code sequence is needed for other targets.
03985 static void expandLoadStackGuard(MachineInstrBuilder &MIB,
03986                                  const TargetInstrInfo &TII) {
03987   MachineBasicBlock &MBB = *MIB->getParent();
03988   DebugLoc DL = MIB->getDebugLoc();
03989   unsigned Reg = MIB->getOperand(0).getReg();
03990   const GlobalValue *GV =
03991       cast<GlobalValue>((*MIB->memoperands_begin())->getValue());
03992   unsigned Flag = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant;
03993   MachineMemOperand *MMO = MBB.getParent()->
03994       getMachineMemOperand(MachinePointerInfo::getGOT(), Flag, 8, 8);
03995   MachineBasicBlock::iterator I = MIB;
03996 
03997   BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1)
03998       .addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0)
03999       .addMemOperand(MMO);
04000   MIB->setDebugLoc(DL);
04001   MIB->setDesc(TII.get(X86::MOV64rm));
04002   MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0);
04003 }
04004 
04005 bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
04006   bool HasAVX = Subtarget.hasAVX();
04007   MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
04008   switch (MI->getOpcode()) {
04009   case X86::MOV32r0:
04010     return Expand2AddrUndef(MIB, get(X86::XOR32rr));
04011   case X86::SETB_C8r:
04012     return Expand2AddrUndef(MIB, get(X86::SBB8rr));
04013   case X86::SETB_C16r:
04014     return Expand2AddrUndef(MIB, get(X86::SBB16rr));
04015   case X86::SETB_C32r:
04016     return Expand2AddrUndef(MIB, get(X86::SBB32rr));
04017   case X86::SETB_C64r:
04018     return Expand2AddrUndef(MIB, get(X86::SBB64rr));
04019   case X86::V_SET0:
04020   case X86::FsFLD0SS:
04021   case X86::FsFLD0SD:
04022     return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr));
04023   case X86::AVX_SET0:
04024     assert(HasAVX && "AVX not supported");
04025     return Expand2AddrUndef(MIB, get(X86::VXORPSYrr));
04026   case X86::AVX512_512_SET0:
04027     return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
04028   case X86::V_SETALLONES:
04029     return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
04030   case X86::AVX2_SETALLONES:
04031     return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr));
04032   case X86::TEST8ri_NOREX:
04033     MI->setDesc(get(X86::TEST8ri));
04034     return true;
04035   case X86::KSET0B: 
04036   case X86::KSET0W: return Expand2AddrUndef(MIB, get(X86::KXORWrr));
04037   case X86::KSET1B:
04038   case X86::KSET1W: return Expand2AddrUndef(MIB, get(X86::KXNORWrr));
04039   case TargetOpcode::LOAD_STACK_GUARD:
04040     expandLoadStackGuard(MIB, *this);
04041     return true;
04042   }
04043   return false;
04044 }
04045 
04046 static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
04047                                      const SmallVectorImpl<MachineOperand> &MOs,
04048                                      MachineInstr *MI,
04049                                      const TargetInstrInfo &TII) {
04050   // Create the base instruction with the memory operand as the first part.
04051   // Omit the implicit operands, something BuildMI can't do.
04052   MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
04053                                               MI->getDebugLoc(), true);
04054   MachineInstrBuilder MIB(MF, NewMI);
04055   unsigned NumAddrOps = MOs.size();
04056   for (unsigned i = 0; i != NumAddrOps; ++i)
04057     MIB.addOperand(MOs[i]);
04058   if (NumAddrOps < 4)  // FrameIndex only
04059     addOffset(MIB, 0);
04060 
04061   // Loop over the rest of the ri operands, converting them over.
04062   unsigned NumOps = MI->getDesc().getNumOperands()-2;
04063   for (unsigned i = 0; i != NumOps; ++i) {
04064     MachineOperand &MO = MI->getOperand(i+2);
04065     MIB.addOperand(MO);
04066   }
04067   for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) {
04068     MachineOperand &MO = MI->getOperand(i);
04069     MIB.addOperand(MO);
04070   }
04071   return MIB;
04072 }
04073 
04074 static MachineInstr *FuseInst(MachineFunction &MF,
04075                               unsigned Opcode, unsigned OpNo,
04076                               const SmallVectorImpl<MachineOperand> &MOs,
04077                               MachineInstr *MI, const TargetInstrInfo &TII) {
04078   // Omit the implicit operands, something BuildMI can't do.
04079   MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
04080                                               MI->getDebugLoc(), true);
04081   MachineInstrBuilder MIB(MF, NewMI);
04082 
04083   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
04084     MachineOperand &MO = MI->getOperand(i);
04085     if (i == OpNo) {
04086       assert(MO.isReg() && "Expected to fold into reg operand!");
04087       unsigned NumAddrOps = MOs.size();
04088       for (unsigned i = 0; i != NumAddrOps; ++i)
04089         MIB.addOperand(MOs[i]);
04090       if (NumAddrOps < 4)  // FrameIndex only
04091         addOffset(MIB, 0);
04092     } else {
04093       MIB.addOperand(MO);
04094     }
04095   }
04096   return MIB;
04097 }
04098 
04099 static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
04100                                 const SmallVectorImpl<MachineOperand> &MOs,
04101                                 MachineInstr *MI) {
04102   MachineFunction &MF = *MI->getParent()->getParent();
04103   MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode));
04104 
04105   unsigned NumAddrOps = MOs.size();
04106   for (unsigned i = 0; i != NumAddrOps; ++i)
04107     MIB.addOperand(MOs[i]);
04108   if (NumAddrOps < 4)  // FrameIndex only
04109     addOffset(MIB, 0);
04110   return MIB.addImm(0);
04111 }
04112 
04113 MachineInstr*
04114 X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
04115                                     MachineInstr *MI, unsigned i,
04116                                     const SmallVectorImpl<MachineOperand> &MOs,
04117                                     unsigned Size, unsigned Align) const {
04118   const DenseMap<unsigned,
04119                  std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr;
04120   bool isCallRegIndirect = Subtarget.callRegIndirect();
04121   bool isTwoAddrFold = false;
04122 
04123   // Atom favors register form of call. So, we do not fold loads into calls
04124   // when X86Subtarget is Atom.
04125   if (isCallRegIndirect &&
04126     (MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r)) {
04127     return nullptr;
04128   }
04129 
04130   unsigned NumOps = MI->getDesc().getNumOperands();
04131   bool isTwoAddr = NumOps > 1 &&
04132     MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
04133 
04134   // FIXME: AsmPrinter doesn't know how to handle
04135   // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
04136   if (MI->getOpcode() == X86::ADD32ri &&
04137       MI->getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
04138     return nullptr;
04139 
04140   MachineInstr *NewMI = nullptr;
04141   // Folding a memory location into the two-address part of a two-address
04142   // instruction is different than folding it other places.  It requires
04143   // replacing the *two* registers with the memory location.
04144   if (isTwoAddr && NumOps >= 2 && i < 2 &&
04145       MI->getOperand(0).isReg() &&
04146       MI->getOperand(1).isReg() &&
04147       MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
04148     OpcodeTablePtr = &RegOp2MemOpTable2Addr;
04149     isTwoAddrFold = true;
04150   } else if (i == 0) { // If operand 0
04151     if (MI->getOpcode() == X86::MOV32r0) {
04152       NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI);
04153       if (NewMI)
04154         return NewMI;
04155     }
04156 
04157     OpcodeTablePtr = &RegOp2MemOpTable0;
04158   } else if (i == 1) {
04159     OpcodeTablePtr = &RegOp2MemOpTable1;
04160   } else if (i == 2) {
04161     OpcodeTablePtr = &RegOp2MemOpTable2;
04162   } else if (i == 3) {
04163     OpcodeTablePtr = &RegOp2MemOpTable3;
04164   }
04165 
04166   // If table selected...
04167   if (OpcodeTablePtr) {
04168     // Find the Opcode to fuse
04169     DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
04170       OpcodeTablePtr->find(MI->getOpcode());
04171     if (I != OpcodeTablePtr->end()) {
04172       unsigned Opcode = I->second.first;
04173       unsigned MinAlign = (I->second.second & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT;
04174       if (Align < MinAlign)
04175         return nullptr;
04176       bool NarrowToMOV32rm = false;
04177       if (Size) {
04178         unsigned RCSize = getRegClass(MI->getDesc(), i, &RI, MF)->getSize();
04179         if (Size < RCSize) {
04180           // Check if it's safe to fold the load. If the size of the object is
04181           // narrower than the load width, then it's not.
04182           if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4)
04183             return nullptr;
04184           // If this is a 64-bit load, but the spill slot is 32, then we can do
04185           // a 32-bit load which is implicitly zero-extended. This likely is due
04186           // to liveintervalanalysis remat'ing a load from stack slot.
04187           if (MI->getOperand(0).getSubReg() || MI->getOperand(1).getSubReg())
04188             return nullptr;
04189           Opcode = X86::MOV32rm;
04190           NarrowToMOV32rm = true;
04191         }
04192       }
04193 
04194       if (isTwoAddrFold)
04195         NewMI = FuseTwoAddrInst(MF, Opcode, MOs, MI, *this);
04196       else
04197         NewMI = FuseInst(MF, Opcode, i, MOs, MI, *this);
04198 
04199       if (NarrowToMOV32rm) {
04200         // If this is the special case where we use a MOV32rm to load a 32-bit
04201         // value and zero-extend the top bits. Change the destination register
04202         // to a 32-bit one.
04203         unsigned DstReg = NewMI->getOperand(0).getReg();
04204         if (TargetRegisterInfo::isPhysicalRegister(DstReg))
04205           NewMI->getOperand(0).setReg(RI.getSubReg(DstReg,
04206                                                    X86::sub_32bit));
04207         else
04208           NewMI->getOperand(0).setSubReg(X86::sub_32bit);
04209       }
04210       return NewMI;
04211     }
04212   }
04213 
04214   // No fusion
04215   if (PrintFailedFusing && !MI->isCopy())
04216     dbgs() << "We failed to fuse operand " << i << " in " << *MI;
04217   return nullptr;
04218 }
04219 
04220 /// hasPartialRegUpdate - Return true for all instructions that only update
04221 /// the first 32 or 64-bits of the destination register and leave the rest
04222 /// unmodified. This can be used to avoid folding loads if the instructions
04223 /// only update part of the destination register, and the non-updated part is
04224 /// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these
04225 /// instructions breaks the partial register dependency and it can improve
04226 /// performance. e.g.:
04227 ///
04228 ///   movss (%rdi), %xmm0
04229 ///   cvtss2sd %xmm0, %xmm0
04230 ///
04231 /// Instead of
04232 ///   cvtss2sd (%rdi), %xmm0
04233 ///
04234 /// FIXME: This should be turned into a TSFlags.
04235 ///
04236 static bool hasPartialRegUpdate(unsigned Opcode) {
04237   switch (Opcode) {
04238   case X86::CVTSI2SSrr:
04239   case X86::CVTSI2SS64rr:
04240   case X86::CVTSI2SDrr:
04241   case X86::CVTSI2SD64rr:
04242   case X86::CVTSD2SSrr:
04243   case X86::Int_CVTSD2SSrr:
04244   case X86::CVTSS2SDrr:
04245   case X86::Int_CVTSS2SDrr:
04246   case X86::RCPSSr:
04247   case X86::RCPSSr_Int:
04248   case X86::ROUNDSDr:
04249   case X86::ROUNDSDr_Int:
04250   case X86::ROUNDSSr:
04251   case X86::ROUNDSSr_Int:
04252   case X86::RSQRTSSr:
04253   case X86::RSQRTSSr_Int:
04254   case X86::SQRTSSr:
04255   case X86::SQRTSSr_Int:
04256     return true;
04257   }
04258 
04259   return false;
04260 }
04261 
04262 /// getPartialRegUpdateClearance - Inform the ExeDepsFix pass how many idle
04263 /// instructions we would like before a partial register update.
04264 unsigned X86InstrInfo::
04265 getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
04266                              const TargetRegisterInfo *TRI) const {
04267   if (OpNum != 0 || !hasPartialRegUpdate(MI->getOpcode()))
04268     return 0;
04269 
04270   // If MI is marked as reading Reg, the partial register update is wanted.
04271   const MachineOperand &MO = MI->getOperand(0);
04272   unsigned Reg = MO.getReg();
04273   if (TargetRegisterInfo::isVirtualRegister(Reg)) {
04274     if (MO.readsReg() || MI->readsVirtualRegister(Reg))
04275       return 0;
04276   } else {
04277     if (MI->readsRegister(Reg, TRI))
04278       return 0;
04279   }
04280 
04281   // If any of the preceding 16 instructions are reading Reg, insert a
04282   // dependency breaking instruction.  The magic number is based on a few
04283   // Nehalem experiments.
04284   return 16;
04285 }
04286 
04287 // Return true for any instruction the copies the high bits of the first source
04288 // operand into the unused high bits of the destination operand.
04289 static bool hasUndefRegUpdate(unsigned Opcode) {
04290   switch (Opcode) {
04291   case X86::VCVTSI2SSrr:
04292   case X86::Int_VCVTSI2SSrr:
04293   case X86::VCVTSI2SS64rr:
04294   case X86::Int_VCVTSI2SS64rr:
04295   case X86::VCVTSI2SDrr:
04296   case X86::Int_VCVTSI2SDrr:
04297   case X86::VCVTSI2SD64rr:
04298   case X86::Int_VCVTSI2SD64rr:
04299   case X86::VCVTSD2SSrr:
04300   case X86::Int_VCVTSD2SSrr:
04301   case X86::VCVTSS2SDrr:
04302   case X86::Int_VCVTSS2SDrr:
04303   case X86::VRCPSSr:
04304   case X86::VROUNDSDr:
04305   case X86::VROUNDSDr_Int:
04306   case X86::VROUNDSSr:
04307   case X86::VROUNDSSr_Int:
04308   case X86::VRSQRTSSr:
04309   case X86::VSQRTSSr:
04310 
04311   // AVX-512
04312   case X86::VCVTSD2SSZrr:
04313   case X86::VCVTSS2SDZrr:
04314     return true;
04315   }
04316 
04317   return false;
04318 }
04319 
04320 /// Inform the ExeDepsFix pass how many idle instructions we would like before
04321 /// certain undef register reads.
04322 ///
04323 /// This catches the VCVTSI2SD family of instructions:
04324 ///
04325 /// vcvtsi2sdq %rax, %xmm0<undef>, %xmm14
04326 ///
04327 /// We should to be careful *not* to catch VXOR idioms which are presumably
04328 /// handled specially in the pipeline:
04329 ///
04330 /// vxorps %xmm1<undef>, %xmm1<undef>, %xmm1
04331 ///
04332 /// Like getPartialRegUpdateClearance, this makes a strong assumption that the
04333 /// high bits that are passed-through are not live.
04334 unsigned X86InstrInfo::
04335 getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
04336                      const TargetRegisterInfo *TRI) const {
04337   if (!hasUndefRegUpdate(MI->getOpcode()))
04338     return 0;
04339 
04340   // Set the OpNum parameter to the first source operand.
04341   OpNum = 1;
04342 
04343   const MachineOperand &MO = MI->getOperand(OpNum);
04344   if (MO.isUndef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
04345     // Use the same magic number as getPartialRegUpdateClearance.
04346     return 16;
04347   }
04348   return 0;
04349 }
04350 
04351 void X86InstrInfo::
04352 breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
04353                           const TargetRegisterInfo *TRI) const {
04354   unsigned Reg = MI->getOperand(OpNum).getReg();
04355   // If MI kills this register, the false dependence is already broken.
04356   if (MI->killsRegister(Reg, TRI))
04357     return;
04358   if (X86::VR128RegClass.contains(Reg)) {
04359     // These instructions are all floating point domain, so xorps is the best
04360     // choice.
04361     bool HasAVX = Subtarget.hasAVX();
04362     unsigned Opc = HasAVX ? X86::VXORPSrr : X86::XORPSrr;
04363     BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(Opc), Reg)
04364       .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
04365   } else if (X86::VR256RegClass.contains(Reg)) {
04366     // Use vxorps to clear the full ymm register.
04367     // It wants to read and write the xmm sub-register.
04368     unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm);
04369     BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(X86::VXORPSrr), XReg)
04370       .addReg(XReg, RegState::Undef).addReg(XReg, RegState::Undef)
04371       .addReg(Reg, RegState::ImplicitDefine);
04372   } else
04373     return;
04374   MI->addRegisterKilled(Reg, TRI, true);
04375 }
04376 
04377 MachineInstr*
04378 X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
04379                                     const SmallVectorImpl<unsigned> &Ops,
04380                                     int FrameIndex) const {
04381   // Check switch flag
04382   if (NoFusing) return nullptr;
04383 
04384   // Unless optimizing for size, don't fold to avoid partial
04385   // register update stalls
04386   if (!MF.getFunction()->getAttributes().
04387         hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) &&
04388       hasPartialRegUpdate(MI->getOpcode()))
04389     return nullptr;
04390 
04391   const MachineFrameInfo *MFI = MF.getFrameInfo();
04392   unsigned Size = MFI->getObjectSize(FrameIndex);
04393   unsigned Alignment = MFI->getObjectAlignment(FrameIndex);
04394   // If the function stack isn't realigned we don't want to fold instructions
04395   // that need increased alignment.
04396   if (!RI.needsStackRealignment(MF))
04397     Alignment = std::min(Alignment, MF.getTarget()
04398                                         .getSubtargetImpl()
04399                                         ->getFrameLowering()
04400                                         ->getStackAlignment());
04401   if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
04402     unsigned NewOpc = 0;
04403     unsigned RCSize = 0;
04404     switch (MI->getOpcode()) {
04405     default: return nullptr;
04406     case X86::TEST8rr:  NewOpc = X86::CMP8ri; RCSize = 1; break;
04407     case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
04408     case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break;
04409     case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break;
04410     }
04411     // Check if it's safe to fold the load. If the size of the object is
04412     // narrower than the load width, then it's not.
04413     if (Size < RCSize)
04414       return nullptr;
04415     // Change to CMPXXri r, 0 first.
04416     MI->setDesc(get(NewOpc));
04417     MI->getOperand(1).ChangeToImmediate(0);
04418   } else if (Ops.size() != 1)
04419     return nullptr;
04420 
04421   SmallVector<MachineOperand,4> MOs;
04422   MOs.push_back(MachineOperand::CreateFI(FrameIndex));
04423   return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, Size, Alignment);
04424 }
04425 
04426 static bool isPartialRegisterLoad(const MachineInstr &LoadMI,
04427                                   const MachineFunction &MF) {
04428   unsigned Opc = LoadMI.getOpcode();
04429   unsigned RegSize =
04430       MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg())->getSize();
04431 
04432   if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm) && RegSize > 4)
04433     // These instructions only load 32 bits, we can't fold them if the
04434     // destination register is wider than 32 bits (4 bytes).
04435     return true;
04436 
04437   if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm) && RegSize > 8)
04438     // These instructions only load 64 bits, we can't fold them if the
04439     // destination register is wider than 64 bits (8 bytes).
04440     return true;
04441 
04442   return false;
04443 }
04444 
04445 MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
04446                                                   MachineInstr *MI,
04447                                            const SmallVectorImpl<unsigned> &Ops,
04448                                                   MachineInstr *LoadMI) const {
04449   // If loading from a FrameIndex, fold directly from the FrameIndex.
04450   unsigned NumOps = LoadMI->getDesc().getNumOperands();
04451   int FrameIndex;
04452   if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
04453     if (isPartialRegisterLoad(*LoadMI, MF))
04454       return nullptr;
04455     return foldMemoryOperandImpl(MF, MI, Ops, FrameIndex);
04456   }
04457 
04458   // Check switch flag
04459   if (NoFusing) return nullptr;
04460 
04461   // Unless optimizing for size, don't fold to avoid partial
04462   // register update stalls
04463   if (!MF.getFunction()->getAttributes().
04464         hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) &&
04465       hasPartialRegUpdate(MI->getOpcode()))
04466     return nullptr;
04467 
04468   // Determine the alignment of the load.
04469   unsigned Alignment = 0;
04470   if (LoadMI->hasOneMemOperand())
04471     Alignment = (*LoadMI->memoperands_begin())->getAlignment();
04472   else
04473     switch (LoadMI->getOpcode()) {
04474     case X86::AVX2_SETALLONES:
04475     case X86::AVX_SET0:
04476       Alignment = 32;
04477       break;
04478     case X86::V_SET0:
04479     case X86::V_SETALLONES:
04480       Alignment = 16;
04481       break;
04482     case X86::FsFLD0SD:
04483       Alignment = 8;
04484       break;
04485     case X86::FsFLD0SS:
04486       Alignment = 4;
04487       break;
04488     default:
04489       return nullptr;
04490     }
04491   if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
04492     unsigned NewOpc = 0;
04493     switch (MI->getOpcode()) {
04494     default: return nullptr;
04495     case X86::TEST8rr:  NewOpc = X86::CMP8ri; break;
04496     case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
04497     case X86::TEST32rr: NewOpc = X86::CMP32ri8; break;
04498     case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
04499     }
04500     // Change to CMPXXri r, 0 first.
04501     MI->setDesc(get(NewOpc));
04502     MI->getOperand(1).ChangeToImmediate(0);
04503   } else if (Ops.size() != 1)
04504     return nullptr;
04505 
04506   // Make sure the subregisters match.
04507   // Otherwise we risk changing the size of the load.
04508   if (LoadMI->getOperand(0).getSubReg() != MI->getOperand(Ops[0]).getSubReg())
04509     return nullptr;
04510 
04511   SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
04512   switch (LoadMI->getOpcode()) {
04513   case X86::V_SET0:
04514   case X86::V_SETALLONES:
04515   case X86::AVX2_SETALLONES:
04516   case X86::AVX_SET0:
04517   case X86::FsFLD0SD:
04518   case X86::FsFLD0SS: {
04519     // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
04520     // Create a constant-pool entry and operands to load from it.
04521 
04522     // Medium and large mode can't fold loads this way.
04523     if (MF.getTarget().getCodeModel() != CodeModel::Small &&
04524         MF.getTarget().getCodeModel() != CodeModel::Kernel)
04525       return nullptr;
04526 
04527     // x86-32 PIC requires a PIC base register for constant pools.
04528     unsigned PICBase = 0;
04529     if (MF.getTarget().getRelocationModel() == Reloc::PIC_) {
04530       if (Subtarget.is64Bit())
04531         PICBase = X86::RIP;
04532       else
04533         // FIXME: PICBase = getGlobalBaseReg(&MF);
04534         // This doesn't work for several reasons.
04535         // 1. GlobalBaseReg may have been spilled.
04536         // 2. It may not be live at MI.
04537         return nullptr;
04538     }
04539 
04540     // Create a constant-pool entry.
04541     MachineConstantPool &MCP = *MF.getConstantPool();
04542     Type *Ty;
04543     unsigned Opc = LoadMI->getOpcode();
04544     if (Opc == X86::FsFLD0SS)
04545       Ty = Type::getFloatTy(MF.getFunction()->getContext());
04546     else if (Opc == X86::FsFLD0SD)
04547       Ty = Type::getDoubleTy(MF.getFunction()->getContext());
04548     else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0)
04549       Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 8);
04550     else
04551       Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4);
04552 
04553     bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES);
04554     const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) :
04555                                     Constant::getNullValue(Ty);
04556     unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
04557 
04558     // Create operands to load from the constant pool entry.
04559     MOs.push_back(MachineOperand::CreateReg(PICBase, false));
04560     MOs.push_back(MachineOperand::CreateImm(1));
04561     MOs.push_back(MachineOperand::CreateReg(0, false));
04562     MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
04563     MOs.push_back(MachineOperand::CreateReg(0, false));
04564     break;
04565   }
04566   default: {
04567     if (isPartialRegisterLoad(*LoadMI, MF))
04568       return nullptr;
04569 
04570     // Folding a normal load. Just copy the load's address operands.
04571     for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
04572       MOs.push_back(LoadMI->getOperand(i));
04573     break;
04574   }
04575   }
04576   return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, 0, Alignment);
04577 }
04578 
04579 
04580 bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
04581                                   const SmallVectorImpl<unsigned> &Ops) const {
04582   // Check switch flag
04583   if (NoFusing) return 0;
04584 
04585   if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
04586     switch (MI->getOpcode()) {
04587     default: return false;
04588     case X86::TEST8rr:
04589     case X86::TEST16rr:
04590     case X86::TEST32rr:
04591     case X86::TEST64rr:
04592       return true;
04593     case X86::ADD32ri:
04594       // FIXME: AsmPrinter doesn't know how to handle
04595       // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
04596       if (MI->getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
04597         return false;
04598       break;
04599     }
04600   }
04601 
04602   if (Ops.size() != 1)
04603     return false;
04604 
04605   unsigned OpNum = Ops[0];
04606   unsigned Opc = MI->getOpcode();
04607   unsigned NumOps = MI->getDesc().getNumOperands();
04608   bool isTwoAddr = NumOps > 1 &&
04609     MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
04610 
04611   // Folding a memory location into the two-address part of a two-address
04612   // instruction is different than folding it other places.  It requires
04613   // replacing the *two* registers with the memory location.
04614   const DenseMap<unsigned,
04615                  std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr;
04616   if (isTwoAddr && NumOps >= 2 && OpNum < 2) {
04617     OpcodeTablePtr = &RegOp2MemOpTable2Addr;
04618   } else if (OpNum == 0) { // If operand 0
04619     if (Opc == X86::MOV32r0)
04620       return true;
04621 
04622     OpcodeTablePtr = &RegOp2MemOpTable0;
04623   } else if (OpNum == 1) {
04624     OpcodeTablePtr = &RegOp2MemOpTable1;
04625   } else if (OpNum == 2) {
04626     OpcodeTablePtr = &RegOp2MemOpTable2;
04627   } else if (OpNum == 3) {
04628     OpcodeTablePtr = &RegOp2MemOpTable3;
04629   }
04630 
04631   if (OpcodeTablePtr && OpcodeTablePtr->count(Opc))
04632     return true;
04633   return TargetInstrInfo::canFoldMemoryOperand(MI, Ops);
04634 }
04635 
04636 bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
04637                                 unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
04638                                 SmallVectorImpl<MachineInstr*> &NewMIs) const {
04639   DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
04640     MemOp2RegOpTable.find(MI->getOpcode());
04641   if (I == MemOp2RegOpTable.end())
04642     return false;
04643   unsigned Opc = I->second.first;
04644   unsigned Index = I->second.second & TB_INDEX_MASK;
04645   bool FoldedLoad = I->second.second & TB_FOLDED_LOAD;
04646   bool FoldedStore = I->second.second & TB_FOLDED_STORE;
04647   if (UnfoldLoad && !FoldedLoad)
04648     return false;
04649   UnfoldLoad &= FoldedLoad;
04650   if (UnfoldStore && !FoldedStore)
04651     return false;
04652   UnfoldStore &= FoldedStore;
04653 
04654   const MCInstrDesc &MCID = get(Opc);
04655   const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
04656   if (!MI->hasOneMemOperand() &&
04657       RC == &X86::VR128RegClass &&
04658       !Subtarget.isUnalignedMemAccessFast())
04659     // Without memoperands, loadRegFromAddr and storeRegToStackSlot will
04660     // conservatively assume the address is unaligned. That's bad for
04661     // performance.
04662     return false;
04663   SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
04664   SmallVector<MachineOperand,2> BeforeOps;
04665   SmallVector<MachineOperand,2> AfterOps;
04666   SmallVector<MachineOperand,4> ImpOps;
04667   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
04668     MachineOperand &Op = MI->getOperand(i);
04669     if (i >= Index && i < Index + X86::AddrNumOperands)
04670       AddrOps.push_back(Op);
04671     else if (Op.isReg() && Op.isImplicit())
04672       ImpOps.push_back(Op);
04673     else if (i < Index)
04674       BeforeOps.push_back(Op);
04675     else if (i > Index)
04676       AfterOps.push_back(Op);
04677   }
04678 
04679   // Emit the load instruction.
04680   if (UnfoldLoad) {
04681     std::pair<MachineInstr::mmo_iterator,
04682               MachineInstr::mmo_iterator> MMOs =
04683       MF.extractLoadMemRefs(MI->memoperands_begin(),
04684                             MI->memoperands_end());
04685     loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs);
04686     if (UnfoldStore) {
04687       // Address operands cannot be marked isKill.
04688       for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
04689         MachineOperand &MO = NewMIs[0]->getOperand(i);
04690         if (MO.isReg())
04691           MO.setIsKill(false);
04692       }
04693     }
04694   }
04695 
04696   // Emit the data processing instruction.
04697   MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI->getDebugLoc(), true);
04698   MachineInstrBuilder MIB(MF, DataMI);
04699 
04700   if (FoldedStore)
04701     MIB.addReg(Reg, RegState::Define);
04702   for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i)
04703     MIB.addOperand(BeforeOps[i]);
04704   if (FoldedLoad)
04705     MIB.addReg(Reg);
04706   for (unsigned i = 0, e = AfterOps.size(); i != e; ++i)
04707     MIB.addOperand(AfterOps[i]);
04708   for (unsigned i = 0, e = ImpOps.size(); i != e; ++i) {
04709     MachineOperand &MO = ImpOps[i];
04710     MIB.addReg(MO.getReg(),
04711                getDefRegState(MO.isDef()) |
04712                RegState::Implicit |
04713                getKillRegState(MO.isKill()) |
04714                getDeadRegState(MO.isDead()) |
04715                getUndefRegState(MO.isUndef()));
04716   }
04717   // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
04718   switch (DataMI->getOpcode()) {
04719   default: break;
04720   case X86::CMP64ri32:
04721   case X86::CMP64ri8:
04722   case X86::CMP32ri:
04723   case X86::CMP32ri8:
04724   case X86::CMP16ri:
04725   case X86::CMP16ri8:
04726   case X86::CMP8ri: {
04727     MachineOperand &MO0 = DataMI->getOperand(0);
04728     MachineOperand &MO1 = DataMI->getOperand(1);
04729     if (MO1.getImm() == 0) {
04730       unsigned NewOpc;
04731       switch (DataMI->getOpcode()) {
04732       default: llvm_unreachable("Unreachable!");
04733       case X86::CMP64ri8:
04734       case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
04735       case X86::CMP32ri8:
04736       case X86::CMP32ri:   NewOpc = X86::TEST32rr; break;
04737       case X86::CMP16ri8:
04738       case X86::CMP16ri:   NewOpc = X86::TEST16rr; break;
04739       case X86::CMP8ri:    NewOpc = X86::TEST8rr; break;
04740       }
04741       DataMI->setDesc(get(NewOpc));
04742       MO1.ChangeToRegister(MO0.getReg(), false);
04743     }
04744   }
04745   }
04746   NewMIs.push_back(DataMI);
04747 
04748   // Emit the store instruction.
04749   if (UnfoldStore) {
04750     const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
04751     std::pair<MachineInstr::mmo_iterator,
04752               MachineInstr::mmo_iterator> MMOs =
04753       MF.extractStoreMemRefs(MI->memoperands_begin(),
04754                              MI->memoperands_end());
04755     storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs);
04756   }
04757 
04758   return true;
04759 }
04760 
04761 bool
04762 X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
04763                                   SmallVectorImpl<SDNode*> &NewNodes) const {
04764   if (!N->isMachineOpcode())
04765     return false;
04766 
04767   DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
04768     MemOp2RegOpTable.find(N->getMachineOpcode());
04769   if (I == MemOp2RegOpTable.end())
04770     return false;
04771   unsigned Opc = I->second.first;
04772   unsigned Index = I->second.second & TB_INDEX_MASK;
04773   bool FoldedLoad = I->second.second & TB_FOLDED_LOAD;
04774   bool FoldedStore = I->second.second & TB_FOLDED_STORE;
04775   const MCInstrDesc &MCID = get(Opc);
04776   MachineFunction &MF = DAG.getMachineFunction();
04777   const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
04778   unsigned NumDefs = MCID.NumDefs;
04779   std::vector<SDValue> AddrOps;
04780   std::vector<SDValue> BeforeOps;
04781   std::vector<SDValue> AfterOps;
04782   SDLoc dl(N);
04783   unsigned NumOps = N->getNumOperands();
04784   for (unsigned i = 0; i != NumOps-1; ++i) {
04785     SDValue Op = N->getOperand(i);
04786     if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands)
04787       AddrOps.push_back(Op);
04788     else if (i < Index-NumDefs)
04789       BeforeOps.push_back(Op);
04790     else if (i > Index-NumDefs)
04791       AfterOps.push_back(Op);
04792   }
04793   SDValue Chain = N->getOperand(NumOps-1);
04794   AddrOps.push_back(Chain);
04795 
04796   // Emit the load instruction.
04797   SDNode *Load = nullptr;
04798   if (FoldedLoad) {
04799     EVT VT = *RC->vt_begin();
04800     std::pair<MachineInstr::mmo_iterator,
04801               MachineInstr::mmo_iterator> MMOs =
04802       MF.extractLoadMemRefs(cast<MachineSDNode>(N)->memoperands_begin(),
04803                             cast<MachineSDNode>(N)->memoperands_end());
04804     if (!(*MMOs.first) &&
04805         RC == &X86::VR128RegClass &&
04806         !Subtarget.isUnalignedMemAccessFast())
04807       // Do not introduce a slow unaligned load.
04808       return false;
04809     unsigned Alignment = RC->getSize() == 32 ? 32 : 16;
04810     bool isAligned = (*MMOs.first) &&
04811                      (*MMOs.first)->getAlignment() >= Alignment;
04812     Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, Subtarget), dl,
04813                               VT, MVT::Other, AddrOps);
04814     NewNodes.push_back(Load);
04815 
04816     // Preserve memory reference information.
04817     cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second);
04818   }
04819 
04820   // Emit the data processing instruction.
04821   std::vector<EVT> VTs;
04822   const TargetRegisterClass *DstRC = nullptr;
04823   if (MCID.getNumDefs() > 0) {
04824     DstRC = getRegClass(MCID, 0, &RI, MF);
04825     VTs.push_back(*DstRC->vt_begin());
04826   }
04827   for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
04828     EVT VT = N->getValueType(i);
04829     if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs())
04830       VTs.push_back(VT);
04831   }
04832   if (Load)
04833     BeforeOps.push_back(SDValue(Load, 0));
04834   std::copy(AfterOps.begin(), AfterOps.end(), std::back_inserter(BeforeOps));
04835   SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps);
04836   NewNodes.push_back(NewNode);
04837 
04838   // Emit the store instruction.
04839   if (FoldedStore) {
04840     AddrOps.pop_back();
04841     AddrOps.push_back(SDValue(NewNode, 0));
04842     AddrOps.push_back(Chain);
04843     std::pair<MachineInstr::mmo_iterator,
04844               MachineInstr::mmo_iterator> MMOs =
04845       MF.extractStoreMemRefs(cast<MachineSDNode>(N)->memoperands_begin(),
04846                              cast<MachineSDNode>(N)->memoperands_end());
04847     if (!(*MMOs.first) &&
04848         RC == &X86::VR128RegClass &&
04849         !Subtarget.isUnalignedMemAccessFast())
04850       // Do not introduce a slow unaligned store.
04851       return false;
04852     unsigned Alignment = RC->getSize() == 32 ? 32 : 16;
04853     bool isAligned = (*MMOs.first) &&
04854                      (*MMOs.first)->getAlignment() >= Alignment;
04855     SDNode *Store =
04856         DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget),
04857                            dl, MVT::Other, AddrOps);
04858     NewNodes.push_back(Store);
04859 
04860     // Preserve memory reference information.
04861     cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second);
04862   }
04863 
04864   return true;
04865 }
04866 
04867 unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
04868                                       bool UnfoldLoad, bool UnfoldStore,
04869                                       unsigned *LoadRegIndex) const {
04870   DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
04871     MemOp2RegOpTable.find(Opc);
04872   if (I == MemOp2RegOpTable.end())
04873     return 0;
04874   bool FoldedLoad = I->second.second & TB_FOLDED_LOAD;
04875   bool FoldedStore = I->second.second & TB_FOLDED_STORE;
04876   if (UnfoldLoad && !FoldedLoad)
04877     return 0;
04878   if (UnfoldStore && !FoldedStore)
04879     return 0;
04880   if (LoadRegIndex)
04881     *LoadRegIndex = I->second.second & TB_INDEX_MASK;
04882   return I->second.first;
04883 }
04884 
04885 bool
04886 X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
04887                                      int64_t &Offset1, int64_t &Offset2) const {
04888   if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
04889     return false;
04890   unsigned Opc1 = Load1->getMachineOpcode();
04891   unsigned Opc2 = Load2->getMachineOpcode();
04892   switch (Opc1) {
04893   default: return false;
04894   case X86::MOV8rm:
04895   case X86::MOV16rm:
04896   case X86::MOV32rm:
04897   case X86::MOV64rm:
04898   case X86::LD_Fp32m:
04899   case X86::LD_Fp64m:
04900   case X86::LD_Fp80m:
04901   case X86::MOVSSrm:
04902   case X86::MOVSDrm:
04903   case X86::MMX_MOVD64rm:
04904   case X86::MMX_MOVQ64rm:
04905   case X86::FsMOVAPSrm:
04906   case X86::FsMOVAPDrm:
04907   case X86::MOVAPSrm:
04908   case X86::MOVUPSrm:
04909   case X86::MOVAPDrm:
04910   case X86::MOVDQArm:
04911   case X86::MOVDQUrm:
04912   // AVX load instructions
04913   case X86::VMOVSSrm:
04914   case X86::VMOVSDrm:
04915   case X86::FsVMOVAPSrm:
04916   case X86::FsVMOVAPDrm:
04917   case X86::VMOVAPSrm:
04918   case X86::VMOVUPSrm:
04919   case X86::VMOVAPDrm:
04920   case X86::VMOVDQArm:
04921   case X86::VMOVDQUrm:
04922   case X86::VMOVAPSYrm:
04923   case X86::VMOVUPSYrm:
04924   case X86::VMOVAPDYrm:
04925   case X86::VMOVDQAYrm:
04926   case X86::VMOVDQUYrm:
04927     break;
04928   }
04929   switch (Opc2) {
04930   default: return false;
04931   case X86::MOV8rm:
04932   case X86::MOV16rm:
04933   case X86::MOV32rm:
04934   case X86::MOV64rm:
04935   case X86::LD_Fp32m:
04936   case X86::LD_Fp64m:
04937   case X86::LD_Fp80m:
04938   case X86::MOVSSrm:
04939   case X86::MOVSDrm:
04940   case X86::MMX_MOVD64rm:
04941   case X86::MMX_MOVQ64rm:
04942   case X86::FsMOVAPSrm:
04943   case X86::FsMOVAPDrm:
04944   case X86::MOVAPSrm:
04945   case X86::MOVUPSrm:
04946   case X86::MOVAPDrm:
04947   case X86::MOVDQArm:
04948   case X86::MOVDQUrm:
04949   // AVX load instructions
04950   case X86::VMOVSSrm:
04951   case X86::VMOVSDrm:
04952   case X86::FsVMOVAPSrm:
04953   case X86::FsVMOVAPDrm:
04954   case X86::VMOVAPSrm:
04955   case X86::VMOVUPSrm:
04956   case X86::VMOVAPDrm:
04957   case X86::VMOVDQArm:
04958   case X86::VMOVDQUrm:
04959   case X86::VMOVAPSYrm:
04960   case X86::VMOVUPSYrm:
04961   case X86::VMOVAPDYrm:
04962   case X86::VMOVDQAYrm:
04963   case X86::VMOVDQUYrm:
04964     break;
04965   }
04966 
04967   // Check if chain operands and base addresses match.
04968   if (Load1->getOperand(0) != Load2->getOperand(0) ||
04969       Load1->getOperand(5) != Load2->getOperand(5))
04970     return false;
04971   // Segment operands should match as well.
04972   if (Load1->getOperand(4) != Load2->getOperand(4))
04973     return false;
04974   // Scale should be 1, Index should be Reg0.
04975   if (Load1->getOperand(1) == Load2->getOperand(1) &&
04976       Load1->getOperand(2) == Load2->getOperand(2)) {
04977     if (cast<ConstantSDNode>(Load1->getOperand(1))->getZExtValue() != 1)
04978       return false;
04979 
04980     // Now let's examine the displacements.
04981     if (isa<ConstantSDNode>(Load1->getOperand(3)) &&
04982         isa<ConstantSDNode>(Load2->getOperand(3))) {
04983       Offset1 = cast<ConstantSDNode>(Load1->getOperand(3))->getSExtValue();
04984       Offset2 = cast<ConstantSDNode>(Load2->getOperand(3))->getSExtValue();
04985       return true;
04986     }
04987   }
04988   return false;
04989 }
04990 
04991 bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
04992                                            int64_t Offset1, int64_t Offset2,
04993                                            unsigned NumLoads) const {
04994   assert(Offset2 > Offset1);
04995   if ((Offset2 - Offset1) / 8 > 64)
04996     return false;
04997 
04998   unsigned Opc1 = Load1->getMachineOpcode();
04999   unsigned Opc2 = Load2->getMachineOpcode();
05000   if (Opc1 != Opc2)
05001     return false;  // FIXME: overly conservative?
05002 
05003   switch (Opc1) {
05004   default: break;
05005   case X86::LD_Fp32m:
05006   case X86::LD_Fp64m:
05007   case X86::LD_Fp80m:
05008   case X86::MMX_MOVD64rm:
05009   case X86::MMX_MOVQ64rm:
05010     return false;
05011   }
05012 
05013   EVT VT = Load1->getValueType(0);
05014   switch (VT.getSimpleVT().SimpleTy) {
05015   default:
05016     // XMM registers. In 64-bit mode we can be a bit more aggressive since we
05017     // have 16 of them to play with.
05018     if (Subtarget.is64Bit()) {
05019       if (NumLoads >= 3)
05020         return false;
05021     } else if (NumLoads) {
05022       return false;
05023     }
05024     break;
05025   case MVT::i8:
05026   case MVT::i16:
05027   case MVT::i32:
05028   case MVT::i64:
05029   case MVT::f32:
05030   case MVT::f64:
05031     if (NumLoads)
05032       return false;
05033     break;
05034   }
05035 
05036   return true;
05037 }
05038 
05039 bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First,
05040                                           MachineInstr *Second) const {
05041   // Check if this processor supports macro-fusion. Since this is a minor
05042   // heuristic, we haven't specifically reserved a feature. hasAVX is a decent
05043   // proxy for SandyBridge+.
05044   if (!Subtarget.hasAVX())
05045     return false;
05046 
05047   enum {
05048     FuseTest,
05049     FuseCmp,
05050     FuseInc
05051   } FuseKind;
05052 
05053   switch(Second->getOpcode()) {
05054   default:
05055     return false;
05056   case X86::JE_4:
05057   case X86::JNE_4:
05058   case X86::JL_4:
05059   case X86::JLE_4:
05060   case X86::JG_4:
05061   case X86::JGE_4:
05062     FuseKind = FuseInc;
05063     break;
05064   case X86::JB_4:
05065   case X86::JBE_4:
05066   case X86::JA_4:
05067   case X86::JAE_4:
05068     FuseKind = FuseCmp;
05069     break;
05070   case X86::JS_4:
05071   case X86::JNS_4:
05072   case X86::JP_4:
05073   case X86::JNP_4:
05074   case X86::JO_4:
05075   case X86::JNO_4:
05076     FuseKind = FuseTest;
05077     break;
05078   }
05079   switch (First->getOpcode()) {
05080   default:
05081     return false;
05082   case X86::TEST8rr:
05083   case X86::TEST16rr:
05084   case X86::TEST32rr:
05085   case X86::TEST64rr:
05086   case X86::TEST8ri:
05087   case X86::TEST16ri:
05088   case X86::TEST32ri:
05089   case X86::TEST32i32:
05090   case X86::TEST64i32:
05091   case X86::TEST64ri32:
05092   case X86::TEST8rm:
05093   case X86::TEST16rm:
05094   case X86::TEST32rm:
05095   case X86::TEST64rm:
05096   case X86::TEST8ri_NOREX:
05097   case X86::AND16i16:
05098   case X86::AND16ri:
05099   case X86::AND16ri8:
05100   case X86::AND16rm:
05101   case X86::AND16rr:
05102   case X86::AND32i32:
05103   case X86::AND32ri:
05104   case X86::AND32ri8:
05105   case X86::AND32rm:
05106   case X86::AND32rr:
05107   case X86::AND64i32:
05108   case X86::AND64ri32:
05109   case X86::AND64ri8:
05110   case X86::AND64rm:
05111   case X86::AND64rr:
05112   case X86::AND8i8:
05113   case X86::AND8ri:
05114   case X86::AND8rm:
05115   case X86::AND8rr:
05116     return true;
05117   case X86::CMP16i16:
05118   case X86::CMP16ri:
05119   case X86::CMP16ri8:
05120   case X86::CMP16rm:
05121   case X86::CMP16rr:
05122   case X86::CMP32i32:
05123   case X86::CMP32ri:
05124   case X86::CMP32ri8:
05125   case X86::CMP32rm:
05126   case X86::CMP32rr:
05127   case X86::CMP64i32:
05128   case X86::CMP64ri32:
05129   case X86::CMP64ri8:
05130   case X86::CMP64rm:
05131   case X86::CMP64rr:
05132   case X86::CMP8i8:
05133   case X86::CMP8ri:
05134   case X86::CMP8rm:
05135   case X86::CMP8rr:
05136   case X86::ADD16i16:
05137   case X86::ADD16ri:
05138   case X86::ADD16ri8:
05139   case X86::ADD16ri8_DB:
05140   case X86::ADD16ri_DB:
05141   case X86::ADD16rm:
05142   case X86::ADD16rr:
05143   case X86::ADD16rr_DB:
05144   case X86::ADD32i32:
05145   case X86::ADD32ri:
05146   case X86::ADD32ri8:
05147   case X86::ADD32ri8_DB:
05148   case X86::ADD32ri_DB:
05149   case X86::ADD32rm:
05150   case X86::ADD32rr:
05151   case X86::ADD32rr_DB:
05152   case X86::ADD64i32:
05153   case X86::ADD64ri32:
05154   case X86::ADD64ri32_DB:
05155   case X86::ADD64ri8:
05156   case X86::ADD64ri8_DB:
05157   case X86::ADD64rm:
05158   case X86::ADD64rr:
05159   case X86::ADD64rr_DB:
05160   case X86::ADD8i8:
05161   case X86::ADD8mi:
05162   case X86::ADD8mr:
05163   case X86::ADD8ri:
05164   case X86::ADD8rm:
05165   case X86::ADD8rr:
05166   case X86::SUB16i16:
05167   case X86::SUB16ri:
05168   case X86::SUB16ri8:
05169   case X86::SUB16rm:
05170   case X86::SUB16rr:
05171   case X86::SUB32i32:
05172   case X86::SUB32ri:
05173   case X86::SUB32ri8:
05174   case X86::SUB32rm:
05175   case X86::SUB32rr:
05176   case X86::SUB64i32:
05177   case X86::SUB64ri32:
05178   case X86::SUB64ri8:
05179   case X86::SUB64rm:
05180   case X86::SUB64rr:
05181   case X86::SUB8i8:
05182   case X86::SUB8ri:
05183   case X86::SUB8rm:
05184   case X86::SUB8rr:
05185     return FuseKind == FuseCmp || FuseKind == FuseInc;
05186   case X86::INC16r:
05187   case X86::INC32r:
05188   case X86::INC64_16r:
05189   case X86::INC64_32r:
05190   case X86::INC64r:
05191   case X86::INC8r:
05192   case X86::DEC16r:
05193   case X86::DEC32r:
05194   case X86::DEC64_16r:
05195   case X86::DEC64_32r:
05196   case X86::DEC64r:
05197   case X86::DEC8r:
05198     return FuseKind == FuseInc;
05199   }
05200 }
05201 
05202 bool X86InstrInfo::
05203 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
05204   assert(Cond.size() == 1 && "Invalid X86 branch condition!");
05205   X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
05206   if (CC == X86::COND_NE_OR_P || CC == X86::COND_NP_OR_E)
05207     return true;
05208   Cond[0].setImm(GetOppositeBranchCondition(CC));
05209   return false;
05210 }
05211 
05212 bool X86InstrInfo::
05213 isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
05214   // FIXME: Return false for x87 stack register classes for now. We can't
05215   // allow any loads of these registers before FpGet_ST0_80.
05216   return !(RC == &X86::CCRRegClass || RC == &X86::RFP32RegClass ||
05217            RC == &X86::RFP64RegClass || RC == &X86::RFP80RegClass);
05218 }
05219 
05220 /// getGlobalBaseReg - Return a virtual register initialized with the
05221 /// the global base register value. Output instructions required to
05222 /// initialize the register in the function entry block, if necessary.
05223 ///
05224 /// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
05225 ///
05226 unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
05227   assert(!Subtarget.is64Bit() &&
05228          "X86-64 PIC uses RIP relative addressing");
05229 
05230   X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
05231   unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
05232   if (GlobalBaseReg != 0)
05233     return GlobalBaseReg;
05234 
05235   // Create the register. The code to initialize it is inserted
05236   // later, by the CGBR pass (below).
05237   MachineRegisterInfo &RegInfo = MF->getRegInfo();
05238   GlobalBaseReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
05239   X86FI->setGlobalBaseReg(GlobalBaseReg);
05240   return GlobalBaseReg;
05241 }
05242 
05243 // These are the replaceable SSE instructions. Some of these have Int variants
05244 // that we don't include here. We don't want to replace instructions selected
05245 // by intrinsics.
05246 static const uint16_t ReplaceableInstrs[][3] = {
05247   //PackedSingle     PackedDouble    PackedInt
05248   { X86::MOVAPSmr,   X86::MOVAPDmr,  X86::MOVDQAmr  },
05249   { X86::MOVAPSrm,   X86::MOVAPDrm,  X86::MOVDQArm  },
05250   { X86::MOVAPSrr,   X86::MOVAPDrr,  X86::MOVDQArr  },
05251   { X86::MOVUPSmr,   X86::MOVUPDmr,  X86::MOVDQUmr  },
05252   { X86::MOVUPSrm,   X86::MOVUPDrm,  X86::MOVDQUrm  },
05253   { X86::MOVNTPSmr,  X86::MOVNTPDmr, X86::MOVNTDQmr },
05254   { X86::ANDNPSrm,   X86::ANDNPDrm,  X86::PANDNrm   },
05255   { X86::ANDNPSrr,   X86::ANDNPDrr,  X86::PANDNrr   },
05256   { X86::ANDPSrm,    X86::ANDPDrm,   X86::PANDrm    },
05257   { X86::ANDPSrr,    X86::ANDPDrr,   X86::PANDrr    },
05258   { X86::ORPSrm,     X86::ORPDrm,    X86::PORrm     },
05259   { X86::ORPSrr,     X86::ORPDrr,    X86::PORrr     },
05260   { X86::XORPSrm,    X86::XORPDrm,   X86::PXORrm    },
05261   { X86::XORPSrr,    X86::XORPDrr,   X86::PXORrr    },
05262   // AVX 128-bit support
05263   { X86::VMOVAPSmr,  X86::VMOVAPDmr,  X86::VMOVDQAmr  },
05264   { X86::VMOVAPSrm,  X86::VMOVAPDrm,  X86::VMOVDQArm  },
05265   { X86::VMOVAPSrr,  X86::VMOVAPDrr,  X86::VMOVDQArr  },
05266   { X86::VMOVUPSmr,  X86::VMOVUPDmr,  X86::VMOVDQUmr  },
05267   { X86::VMOVUPSrm,  X86::VMOVUPDrm,  X86::VMOVDQUrm  },
05268   { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr },
05269   { X86::VANDNPSrm,  X86::VANDNPDrm,  X86::VPANDNrm   },
05270   { X86::VANDNPSrr,  X86::VANDNPDrr,  X86::VPANDNrr   },
05271   { X86::VANDPSrm,   X86::VANDPDrm,   X86::VPANDrm    },
05272   { X86::VANDPSrr,   X86::VANDPDrr,   X86::VPANDrr    },
05273   { X86::VORPSrm,    X86::VORPDrm,    X86::VPORrm     },
05274   { X86::VORPSrr,    X86::VORPDrr,    X86::VPORrr     },
05275   { X86::VXORPSrm,   X86::VXORPDrm,   X86::VPXORrm    },
05276   { X86::VXORPSrr,   X86::VXORPDrr,   X86::VPXORrr    },
05277   // AVX 256-bit support
05278   { X86::VMOVAPSYmr,   X86::VMOVAPDYmr,   X86::VMOVDQAYmr  },
05279   { X86::VMOVAPSYrm,   X86::VMOVAPDYrm,   X86::VMOVDQAYrm  },
05280   { X86::VMOVAPSYrr,   X86::VMOVAPDYrr,   X86::VMOVDQAYrr  },
05281   { X86::VMOVUPSYmr,   X86::VMOVUPDYmr,   X86::VMOVDQUYmr  },
05282   { X86::VMOVUPSYrm,   X86::VMOVUPDYrm,   X86::VMOVDQUYrm  },
05283   { X86::VMOVNTPSYmr,  X86::VMOVNTPDYmr,  X86::VMOVNTDQYmr }
05284 };
05285 
05286 static const uint16_t ReplaceableInstrsAVX2[][3] = {
05287   //PackedSingle       PackedDouble       PackedInt
05288   { X86::VANDNPSYrm,   X86::VANDNPDYrm,   X86::VPANDNYrm   },
05289   { X86::VANDNPSYrr,   X86::VANDNPDYrr,   X86::VPANDNYrr   },
05290   { X86::VANDPSYrm,    X86::VANDPDYrm,    X86::VPANDYrm    },
05291   { X86::VANDPSYrr,    X86::VANDPDYrr,    X86::VPANDYrr    },
05292   { X86::VORPSYrm,     X86::VORPDYrm,     X86::VPORYrm     },
05293   { X86::VORPSYrr,     X86::VORPDYrr,     X86::VPORYrr     },
05294   { X86::VXORPSYrm,    X86::VXORPDYrm,    X86::VPXORYrm    },
05295   { X86::VXORPSYrr,    X86::VXORPDYrr,    X86::VPXORYrr    },
05296   { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr },
05297   { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr },
05298   { X86::VINSERTF128rm,  X86::VINSERTF128rm,  X86::VINSERTI128rm },
05299   { X86::VINSERTF128rr,  X86::VINSERTF128rr,  X86::VINSERTI128rr },
05300   { X86::VPERM2F128rm,   X86::VPERM2F128rm,   X86::VPERM2I128rm },
05301   { X86::VPERM2F128rr,   X86::VPERM2F128rr,   X86::VPERM2I128rr },
05302   { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm},
05303   { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr},
05304   { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr},
05305   { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm},
05306   { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr},
05307   { X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm}
05308 };
05309 
05310 // FIXME: Some shuffle and unpack instructions have equivalents in different
05311 // domains, but they require a bit more work than just switching opcodes.
05312 
05313 static const uint16_t *lookup(unsigned opcode, unsigned domain) {
05314   for (unsigned i = 0, e = array_lengthof(ReplaceableInstrs); i != e; ++i)
05315     if (ReplaceableInstrs[i][domain-1] == opcode)
05316       return ReplaceableInstrs[i];
05317   return nullptr;
05318 }
05319 
05320 static const uint16_t *lookupAVX2(unsigned opcode, unsigned domain) {
05321   for (unsigned i = 0, e = array_lengthof(ReplaceableInstrsAVX2); i != e; ++i)
05322     if (ReplaceableInstrsAVX2[i][domain-1] == opcode)
05323       return ReplaceableInstrsAVX2[i];
05324   return nullptr;
05325 }
05326 
05327 std::pair<uint16_t, uint16_t>
05328 X86InstrInfo::getExecutionDomain(const MachineInstr *MI) const {
05329   uint16_t domain = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
05330   bool hasAVX2 = Subtarget.hasAVX2();
05331   uint16_t validDomains = 0;
05332   if (domain && lookup(MI->getOpcode(), domain))
05333     validDomains = 0xe;
05334   else if (domain && lookupAVX2(MI->getOpcode(), domain))
05335     validDomains = hasAVX2 ? 0xe : 0x6;
05336   return std::make_pair(domain, validDomains);
05337 }
05338 
05339 void X86InstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
05340   assert(Domain>0 && Domain<4 && "Invalid execution domain");
05341   uint16_t dom = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
05342   assert(dom && "Not an SSE instruction");
05343   const uint16_t *table = lookup(MI->getOpcode(), dom);
05344   if (!table) { // try the other table
05345     assert((Subtarget.hasAVX2() || Domain < 3) &&
05346            "256-bit vector operations only available in AVX2");
05347     table = lookupAVX2(MI->getOpcode(), dom);
05348   }
05349   assert(table && "Cannot change domain");
05350   MI->setDesc(get(table[Domain-1]));
05351 }
05352 
05353 /// getNoopForMachoTarget - Return the noop instruction to use for a noop.
05354 void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
05355   NopInst.setOpcode(X86::NOOP);
05356 }
05357 
05358 void X86InstrInfo::getUnconditionalBranch(
05359     MCInst &Branch, const MCSymbolRefExpr *BranchTarget) const {
05360   Branch.setOpcode(X86::JMP_4);
05361   Branch.addOperand(MCOperand::CreateExpr(BranchTarget));
05362 }
05363 
05364 void X86InstrInfo::getTrap(MCInst &MI) const {
05365   MI.setOpcode(X86::TRAP);
05366 }
05367 
05368 bool X86InstrInfo::isHighLatencyDef(int opc) const {
05369   switch (opc) {
05370   default: return false;
05371   case X86::DIVSDrm:
05372   case X86::DIVSDrm_Int:
05373   case X86::DIVSDrr:
05374   case X86::DIVSDrr_Int:
05375   case X86::DIVSSrm:
05376   case X86::DIVSSrm_Int:
05377   case X86::DIVSSrr:
05378   case X86::DIVSSrr_Int:
05379   case X86::SQRTPDm:
05380   case X86::SQRTPDr:
05381   case X86::SQRTPSm:
05382   case X86::SQRTPSr:
05383   case X86::SQRTSDm:
05384   case X86::SQRTSDm_Int:
05385   case X86::SQRTSDr:
05386   case X86::SQRTSDr_Int:
05387   case X86::SQRTSSm:
05388   case X86::SQRTSSm_Int:
05389   case X86::SQRTSSr:
05390   case X86::SQRTSSr_Int:
05391   // AVX instructions with high latency
05392   case X86::VDIVSDrm:
05393   case X86::VDIVSDrm_Int:
05394   case X86::VDIVSDrr:
05395   case X86::VDIVSDrr_Int:
05396   case X86::VDIVSSrm:
05397   case X86::VDIVSSrm_Int:
05398   case X86::VDIVSSrr:
05399   case X86::VDIVSSrr_Int:
05400   case X86::VSQRTPDm:
05401   case X86::VSQRTPDr:
05402   case X86::VSQRTPSm:
05403   case X86::VSQRTPSr:
05404   case X86::VSQRTSDm:
05405   case X86::VSQRTSDm_Int:
05406   case X86::VSQRTSDr:
05407   case X86::VSQRTSSm:
05408   case X86::VSQRTSSm_Int:
05409   case X86::VSQRTSSr:
05410   case X86::VSQRTPDZrm:
05411   case X86::VSQRTPDZrr:
05412   case X86::VSQRTPSZrm:
05413   case X86::VSQRTPSZrr:
05414   case X86::VSQRTSDZm:
05415   case X86::VSQRTSDZm_Int:
05416   case X86::VSQRTSDZr:
05417   case X86::VSQRTSSZm_Int:
05418   case X86::VSQRTSSZr:
05419   case X86::VSQRTSSZm:
05420   case X86::VDIVSDZrm:
05421   case X86::VDIVSDZrr:
05422   case X86::VDIVSSZrm:
05423   case X86::VDIVSSZrr:
05424 
05425   case X86::VGATHERQPSZrm:
05426   case X86::VGATHERQPDZrm:
05427   case X86::VGATHERDPDZrm:
05428   case X86::VGATHERDPSZrm:
05429   case X86::VPGATHERQDZrm:
05430   case X86::VPGATHERQQZrm:
05431   case X86::VPGATHERDDZrm:
05432   case X86::VPGATHERDQZrm:
05433   case X86::VSCATTERQPDZmr:
05434   case X86::VSCATTERQPSZmr:
05435   case X86::VSCATTERDPDZmr:
05436   case X86::VSCATTERDPSZmr:
05437   case X86::VPSCATTERQDZmr:
05438   case X86::VPSCATTERQQZmr:
05439   case X86::VPSCATTERDDZmr:
05440   case X86::VPSCATTERDQZmr:
05441     return true;
05442   }
05443 }
05444 
05445 bool X86InstrInfo::
05446 hasHighOperandLatency(const InstrItineraryData *ItinData,
05447                       const MachineRegisterInfo *MRI,
05448                       const MachineInstr *DefMI, unsigned DefIdx,
05449                       const MachineInstr *UseMI, unsigned UseIdx) const {
05450   return isHighLatencyDef(DefMI->getOpcode());
05451 }
05452 
05453 namespace {
05454   /// CGBR - Create Global Base Reg pass. This initializes the PIC
05455   /// global base register for x86-32.
05456   struct CGBR : public MachineFunctionPass {
05457     static char ID;
05458     CGBR() : MachineFunctionPass(ID) {}
05459 
05460     bool runOnMachineFunction(MachineFunction &MF) override {
05461       const X86TargetMachine *TM =
05462         static_cast<const X86TargetMachine *>(&MF.getTarget());
05463 
05464       // Don't do anything if this is 64-bit as 64-bit PIC
05465       // uses RIP relative addressing.
05466       if (TM->getSubtarget<X86Subtarget>().is64Bit())
05467         return false;
05468 
05469       // Only emit a global base reg in PIC mode.
05470       if (TM->getRelocationModel() != Reloc::PIC_)
05471         return false;
05472 
05473       X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
05474       unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
05475 
05476       // If we didn't need a GlobalBaseReg, don't insert code.
05477       if (GlobalBaseReg == 0)
05478         return false;
05479 
05480       // Insert the set of GlobalBaseReg into the first MBB of the function
05481       MachineBasicBlock &FirstMBB = MF.front();
05482       MachineBasicBlock::iterator MBBI = FirstMBB.begin();
05483       DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
05484       MachineRegisterInfo &RegInfo = MF.getRegInfo();
05485       const X86InstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
05486 
05487       unsigned PC;
05488       if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT())
05489         PC = RegInfo.createVirtualRegister(&X86::GR32RegClass);
05490       else
05491         PC = GlobalBaseReg;
05492 
05493       // Operand of MovePCtoStack is completely ignored by asm printer. It's
05494       // only used in JIT code emission as displacement to pc.
05495       BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
05496 
05497       // If we're using vanilla 'GOT' PIC style, we should use relative addressing
05498       // not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
05499       if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT()) {
05500         // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register
05501         BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
05502           .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
05503                                         X86II::MO_GOT_ABSOLUTE_ADDRESS);
05504       }
05505 
05506       return true;
05507     }
05508 
05509     const char *getPassName() const override {
05510       return "X86 PIC Global Base Reg Initialization";
05511     }
05512 
05513     void getAnalysisUsage(AnalysisUsage &AU) const override {
05514       AU.setPreservesCFG();
05515       MachineFunctionPass::getAnalysisUsage(AU);
05516     }
05517   };
05518 }
05519 
05520 char CGBR::ID = 0;
05521 FunctionPass*
05522 llvm::createX86GlobalBaseRegPass() { return new CGBR(); }
05523 
05524 namespace {
05525   struct LDTLSCleanup : public MachineFunctionPass {
05526     static char ID;
05527     LDTLSCleanup() : MachineFunctionPass(ID) {}
05528 
05529     bool runOnMachineFunction(MachineFunction &MF) override {
05530       X86MachineFunctionInfo* MFI = MF.getInfo<X86MachineFunctionInfo>();
05531       if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
05532         // No point folding accesses if there isn't at least two.
05533         return false;
05534       }
05535 
05536       MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
05537       return VisitNode(DT->getRootNode(), 0);
05538     }
05539 
05540     // Visit the dominator subtree rooted at Node in pre-order.
05541     // If TLSBaseAddrReg is non-null, then use that to replace any
05542     // TLS_base_addr instructions. Otherwise, create the register
05543     // when the first such instruction is seen, and then use it
05544     // as we encounter more instructions.
05545     bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
05546       MachineBasicBlock *BB = Node->getBlock();
05547       bool Changed = false;
05548 
05549       // Traverse the current block.
05550       for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
05551            ++I) {
05552         switch (I->getOpcode()) {
05553           case X86::TLS_base_addr32:
05554           case X86::TLS_base_addr64:
05555             if (TLSBaseAddrReg)
05556               I = ReplaceTLSBaseAddrCall(I, TLSBaseAddrReg);
05557             else
05558               I = SetRegister(I, &TLSBaseAddrReg);
05559             Changed = true;
05560             break;
05561           default:
05562             break;
05563         }
05564       }
05565 
05566       // Visit the children of this block in the dominator tree.
05567       for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end();
05568            I != E; ++I) {
05569         Changed |= VisitNode(*I, TLSBaseAddrReg);
05570       }
05571 
05572       return Changed;
05573     }
05574 
05575     // Replace the TLS_base_addr instruction I with a copy from
05576     // TLSBaseAddrReg, returning the new instruction.
05577     MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I,
05578                                          unsigned TLSBaseAddrReg) {
05579       MachineFunction *MF = I->getParent()->getParent();
05580       const X86TargetMachine *TM =
05581           static_cast<const X86TargetMachine *>(&MF->getTarget());
05582       const bool is64Bit = TM->getSubtarget<X86Subtarget>().is64Bit();
05583       const X86InstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
05584 
05585       // Insert a Copy from TLSBaseAddrReg to RAX/EAX.
05586       MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
05587                                    TII->get(TargetOpcode::COPY),
05588                                    is64Bit ? X86::RAX : X86::EAX)
05589                                    .addReg(TLSBaseAddrReg);
05590 
05591       // Erase the TLS_base_addr instruction.
05592       I->eraseFromParent();
05593 
05594       return Copy;
05595     }
05596 
05597     // Create a virtal register in *TLSBaseAddrReg, and populate it by
05598     // inserting a copy instruction after I. Returns the new instruction.
05599     MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
05600       MachineFunction *MF = I->getParent()->getParent();
05601       const X86TargetMachine *TM =
05602           static_cast<const X86TargetMachine *>(&MF->getTarget());
05603       const bool is64Bit = TM->getSubtarget<X86Subtarget>().is64Bit();
05604       const X86InstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
05605 
05606       // Create a virtual register for the TLS base address.
05607       MachineRegisterInfo &RegInfo = MF->getRegInfo();
05608       *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit
05609                                                       ? &X86::GR64RegClass
05610                                                       : &X86::GR32RegClass);
05611 
05612       // Insert a copy from RAX/EAX to TLSBaseAddrReg.
05613       MachineInstr *Next = I->getNextNode();
05614       MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(),
05615                                    TII->get(TargetOpcode::COPY),
05616                                    *TLSBaseAddrReg)
05617                                    .addReg(is64Bit ? X86::RAX : X86::EAX);
05618 
05619       return Copy;
05620     }
05621 
05622     const char *getPassName() const override {
05623       return "Local Dynamic TLS Access Clean-up";
05624     }
05625 
05626     void getAnalysisUsage(AnalysisUsage &AU) const override {
05627       AU.setPreservesCFG();
05628       AU.addRequired<MachineDominatorTree>();
05629       MachineFunctionPass::getAnalysisUsage(AU);
05630     }
05631   };
05632 }
05633 
05634 char LDTLSCleanup::ID = 0;
05635 FunctionPass*
05636 llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); }