LLVM API Documentation
00001 //===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 00010 #include "MCTargetDesc/X86BaseInfo.h" 00011 #include "MCTargetDesc/X86FixupKinds.h" 00012 #include "llvm/ADT/StringSwitch.h" 00013 #include "llvm/MC/MCAsmBackend.h" 00014 #include "llvm/MC/MCELFObjectWriter.h" 00015 #include "llvm/MC/MCExpr.h" 00016 #include "llvm/MC/MCFixupKindInfo.h" 00017 #include "llvm/MC/MCMachObjectWriter.h" 00018 #include "llvm/MC/MCObjectWriter.h" 00019 #include "llvm/MC/MCSectionCOFF.h" 00020 #include "llvm/MC/MCSectionELF.h" 00021 #include "llvm/MC/MCSectionMachO.h" 00022 #include "llvm/Support/CommandLine.h" 00023 #include "llvm/Support/ELF.h" 00024 #include "llvm/Support/ErrorHandling.h" 00025 #include "llvm/Support/MachO.h" 00026 #include "llvm/Support/TargetRegistry.h" 00027 #include "llvm/Support/raw_ostream.h" 00028 using namespace llvm; 00029 00030 // Option to allow disabling arithmetic relaxation to workaround PR9807, which 00031 // is useful when running bitwise comparison experiments on Darwin. We should be 00032 // able to remove this once PR9807 is resolved. 00033 static cl::opt<bool> 00034 MCDisableArithRelaxation("mc-x86-disable-arith-relaxation", 00035 cl::desc("Disable relaxation of arithmetic instruction for X86")); 00036 00037 static unsigned getFixupKindLog2Size(unsigned Kind) { 00038 switch (Kind) { 00039 default: 00040 llvm_unreachable("invalid fixup kind!"); 00041 case FK_PCRel_1: 00042 case FK_SecRel_1: 00043 case FK_Data_1: 00044 return 0; 00045 case FK_PCRel_2: 00046 case FK_SecRel_2: 00047 case FK_Data_2: 00048 return 1; 00049 case FK_PCRel_4: 00050 case X86::reloc_riprel_4byte: 00051 case X86::reloc_riprel_4byte_movq_load: 00052 case X86::reloc_signed_4byte: 00053 case X86::reloc_global_offset_table: 00054 case FK_SecRel_4: 00055 case FK_Data_4: 00056 return 2; 00057 case FK_PCRel_8: 00058 case FK_SecRel_8: 00059 case FK_Data_8: 00060 case X86::reloc_global_offset_table8: 00061 return 3; 00062 } 00063 } 00064 00065 namespace { 00066 00067 class X86ELFObjectWriter : public MCELFObjectTargetWriter { 00068 public: 00069 X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine, 00070 bool HasRelocationAddend, bool foobar) 00071 : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {} 00072 }; 00073 00074 class X86AsmBackend : public MCAsmBackend { 00075 const StringRef CPU; 00076 bool HasNopl; 00077 const uint64_t MaxNopLength; 00078 public: 00079 X86AsmBackend(const Target &T, StringRef _CPU) 00080 : MCAsmBackend(), CPU(_CPU), MaxNopLength(_CPU == "slm" ? 7 : 15) { 00081 HasNopl = CPU != "generic" && CPU != "i386" && CPU != "i486" && 00082 CPU != "i586" && CPU != "pentium" && CPU != "pentium-mmx" && 00083 CPU != "i686" && CPU != "k6" && CPU != "k6-2" && CPU != "k6-3" && 00084 CPU != "geode" && CPU != "winchip-c6" && CPU != "winchip2" && 00085 CPU != "c3" && CPU != "c3-2"; 00086 } 00087 00088 unsigned getNumFixupKinds() const override { 00089 return X86::NumTargetFixupKinds; 00090 } 00091 00092 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override { 00093 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = { 00094 { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel }, 00095 { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel}, 00096 { "reloc_signed_4byte", 0, 4 * 8, 0}, 00097 { "reloc_global_offset_table", 0, 4 * 8, 0} 00098 }; 00099 00100 if (Kind < FirstTargetFixupKind) 00101 return MCAsmBackend::getFixupKindInfo(Kind); 00102 00103 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 00104 "Invalid kind!"); 00105 return Infos[Kind - FirstTargetFixupKind]; 00106 } 00107 00108 void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, 00109 uint64_t Value, bool IsPCRel) const override { 00110 unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind()); 00111 00112 assert(Fixup.getOffset() + Size <= DataSize && 00113 "Invalid fixup offset!"); 00114 00115 // Check that uppper bits are either all zeros or all ones. 00116 // Specifically ignore overflow/underflow as long as the leakage is 00117 // limited to the lower bits. This is to remain compatible with 00118 // other assemblers. 00119 assert(isIntN(Size * 8 + 1, Value) && 00120 "Value does not fit in the Fixup field"); 00121 00122 for (unsigned i = 0; i != Size; ++i) 00123 Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8)); 00124 } 00125 00126 bool mayNeedRelaxation(const MCInst &Inst) const override; 00127 00128 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, 00129 const MCRelaxableFragment *DF, 00130 const MCAsmLayout &Layout) const override; 00131 00132 void relaxInstruction(const MCInst &Inst, MCInst &Res) const override; 00133 00134 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override; 00135 }; 00136 } // end anonymous namespace 00137 00138 static unsigned getRelaxedOpcodeBranch(unsigned Op) { 00139 switch (Op) { 00140 default: 00141 return Op; 00142 00143 case X86::JAE_1: return X86::JAE_4; 00144 case X86::JA_1: return X86::JA_4; 00145 case X86::JBE_1: return X86::JBE_4; 00146 case X86::JB_1: return X86::JB_4; 00147 case X86::JE_1: return X86::JE_4; 00148 case X86::JGE_1: return X86::JGE_4; 00149 case X86::JG_1: return X86::JG_4; 00150 case X86::JLE_1: return X86::JLE_4; 00151 case X86::JL_1: return X86::JL_4; 00152 case X86::JMP_1: return X86::JMP_4; 00153 case X86::JNE_1: return X86::JNE_4; 00154 case X86::JNO_1: return X86::JNO_4; 00155 case X86::JNP_1: return X86::JNP_4; 00156 case X86::JNS_1: return X86::JNS_4; 00157 case X86::JO_1: return X86::JO_4; 00158 case X86::JP_1: return X86::JP_4; 00159 case X86::JS_1: return X86::JS_4; 00160 } 00161 } 00162 00163 static unsigned getRelaxedOpcodeArith(unsigned Op) { 00164 switch (Op) { 00165 default: 00166 return Op; 00167 00168 // IMUL 00169 case X86::IMUL16rri8: return X86::IMUL16rri; 00170 case X86::IMUL16rmi8: return X86::IMUL16rmi; 00171 case X86::IMUL32rri8: return X86::IMUL32rri; 00172 case X86::IMUL32rmi8: return X86::IMUL32rmi; 00173 case X86::IMUL64rri8: return X86::IMUL64rri32; 00174 case X86::IMUL64rmi8: return X86::IMUL64rmi32; 00175 00176 // AND 00177 case X86::AND16ri8: return X86::AND16ri; 00178 case X86::AND16mi8: return X86::AND16mi; 00179 case X86::AND32ri8: return X86::AND32ri; 00180 case X86::AND32mi8: return X86::AND32mi; 00181 case X86::AND64ri8: return X86::AND64ri32; 00182 case X86::AND64mi8: return X86::AND64mi32; 00183 00184 // OR 00185 case X86::OR16ri8: return X86::OR16ri; 00186 case X86::OR16mi8: return X86::OR16mi; 00187 case X86::OR32ri8: return X86::OR32ri; 00188 case X86::OR32mi8: return X86::OR32mi; 00189 case X86::OR64ri8: return X86::OR64ri32; 00190 case X86::OR64mi8: return X86::OR64mi32; 00191 00192 // XOR 00193 case X86::XOR16ri8: return X86::XOR16ri; 00194 case X86::XOR16mi8: return X86::XOR16mi; 00195 case X86::XOR32ri8: return X86::XOR32ri; 00196 case X86::XOR32mi8: return X86::XOR32mi; 00197 case X86::XOR64ri8: return X86::XOR64ri32; 00198 case X86::XOR64mi8: return X86::XOR64mi32; 00199 00200 // ADD 00201 case X86::ADD16ri8: return X86::ADD16ri; 00202 case X86::ADD16mi8: return X86::ADD16mi; 00203 case X86::ADD32ri8: return X86::ADD32ri; 00204 case X86::ADD32mi8: return X86::ADD32mi; 00205 case X86::ADD64ri8: return X86::ADD64ri32; 00206 case X86::ADD64mi8: return X86::ADD64mi32; 00207 00208 // SUB 00209 case X86::SUB16ri8: return X86::SUB16ri; 00210 case X86::SUB16mi8: return X86::SUB16mi; 00211 case X86::SUB32ri8: return X86::SUB32ri; 00212 case X86::SUB32mi8: return X86::SUB32mi; 00213 case X86::SUB64ri8: return X86::SUB64ri32; 00214 case X86::SUB64mi8: return X86::SUB64mi32; 00215 00216 // CMP 00217 case X86::CMP16ri8: return X86::CMP16ri; 00218 case X86::CMP16mi8: return X86::CMP16mi; 00219 case X86::CMP32ri8: return X86::CMP32ri; 00220 case X86::CMP32mi8: return X86::CMP32mi; 00221 case X86::CMP64ri8: return X86::CMP64ri32; 00222 case X86::CMP64mi8: return X86::CMP64mi32; 00223 00224 // PUSH 00225 case X86::PUSH32i8: return X86::PUSHi32; 00226 case X86::PUSH16i8: return X86::PUSHi16; 00227 case X86::PUSH64i8: return X86::PUSH64i32; 00228 case X86::PUSH64i16: return X86::PUSH64i32; 00229 } 00230 } 00231 00232 static unsigned getRelaxedOpcode(unsigned Op) { 00233 unsigned R = getRelaxedOpcodeArith(Op); 00234 if (R != Op) 00235 return R; 00236 return getRelaxedOpcodeBranch(Op); 00237 } 00238 00239 bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst) const { 00240 // Branches can always be relaxed. 00241 if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode()) 00242 return true; 00243 00244 if (MCDisableArithRelaxation) 00245 return false; 00246 00247 // Check if this instruction is ever relaxable. 00248 if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode()) 00249 return false; 00250 00251 00252 // Check if it has an expression and is not RIP relative. 00253 bool hasExp = false; 00254 bool hasRIP = false; 00255 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) { 00256 const MCOperand &Op = Inst.getOperand(i); 00257 if (Op.isExpr()) 00258 hasExp = true; 00259 00260 if (Op.isReg() && Op.getReg() == X86::RIP) 00261 hasRIP = true; 00262 } 00263 00264 // FIXME: Why exactly do we need the !hasRIP? Is it just a limitation on 00265 // how we do relaxations? 00266 return hasExp && !hasRIP; 00267 } 00268 00269 bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, 00270 uint64_t Value, 00271 const MCRelaxableFragment *DF, 00272 const MCAsmLayout &Layout) const { 00273 // Relax if the value is too big for a (signed) i8. 00274 return int64_t(Value) != int64_t(int8_t(Value)); 00275 } 00276 00277 // FIXME: Can tblgen help at all here to verify there aren't other instructions 00278 // we can relax? 00279 void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const { 00280 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel. 00281 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode()); 00282 00283 if (RelaxedOp == Inst.getOpcode()) { 00284 SmallString<256> Tmp; 00285 raw_svector_ostream OS(Tmp); 00286 Inst.dump_pretty(OS); 00287 OS << "\n"; 00288 report_fatal_error("unexpected instruction to relax: " + OS.str()); 00289 } 00290 00291 Res = Inst; 00292 Res.setOpcode(RelaxedOp); 00293 } 00294 00295 /// \brief Write a sequence of optimal nops to the output, covering \p Count 00296 /// bytes. 00297 /// \return - true on success, false on failure 00298 bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { 00299 static const uint8_t Nops[10][10] = { 00300 // nop 00301 {0x90}, 00302 // xchg %ax,%ax 00303 {0x66, 0x90}, 00304 // nopl (%[re]ax) 00305 {0x0f, 0x1f, 0x00}, 00306 // nopl 0(%[re]ax) 00307 {0x0f, 0x1f, 0x40, 0x00}, 00308 // nopl 0(%[re]ax,%[re]ax,1) 00309 {0x0f, 0x1f, 0x44, 0x00, 0x00}, 00310 // nopw 0(%[re]ax,%[re]ax,1) 00311 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00}, 00312 // nopl 0L(%[re]ax) 00313 {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00}, 00314 // nopl 0L(%[re]ax,%[re]ax,1) 00315 {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, 00316 // nopw 0L(%[re]ax,%[re]ax,1) 00317 {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, 00318 // nopw %cs:0L(%[re]ax,%[re]ax,1) 00319 {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, 00320 }; 00321 00322 // This CPU doesn't support long nops. If needed add more. 00323 // FIXME: Can we get this from the subtarget somehow? 00324 // FIXME: We could generated something better than plain 0x90. 00325 if (!HasNopl) { 00326 for (uint64_t i = 0; i < Count; ++i) 00327 OW->Write8(0x90); 00328 return true; 00329 } 00330 00331 // 15 is the longest single nop instruction. Emit as many 15-byte nops as 00332 // needed, then emit a nop of the remaining length. 00333 do { 00334 const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength); 00335 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10; 00336 for (uint8_t i = 0; i < Prefixes; i++) 00337 OW->Write8(0x66); 00338 const uint8_t Rest = ThisNopLength - Prefixes; 00339 for (uint8_t i = 0; i < Rest; i++) 00340 OW->Write8(Nops[Rest - 1][i]); 00341 Count -= ThisNopLength; 00342 } while (Count != 0); 00343 00344 return true; 00345 } 00346 00347 /* *** */ 00348 00349 namespace { 00350 00351 class ELFX86AsmBackend : public X86AsmBackend { 00352 public: 00353 uint8_t OSABI; 00354 ELFX86AsmBackend(const Target &T, uint8_t _OSABI, StringRef CPU) 00355 : X86AsmBackend(T, CPU), OSABI(_OSABI) {} 00356 }; 00357 00358 class ELFX86_32AsmBackend : public ELFX86AsmBackend { 00359 public: 00360 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) 00361 : ELFX86AsmBackend(T, OSABI, CPU) {} 00362 00363 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override { 00364 return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI, ELF::EM_386); 00365 } 00366 }; 00367 00368 class ELFX86_X32AsmBackend : public ELFX86AsmBackend { 00369 public: 00370 ELFX86_X32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) 00371 : ELFX86AsmBackend(T, OSABI, CPU) {} 00372 00373 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override { 00374 return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI, 00375 ELF::EM_X86_64); 00376 } 00377 }; 00378 00379 class ELFX86_64AsmBackend : public ELFX86AsmBackend { 00380 public: 00381 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) 00382 : ELFX86AsmBackend(T, OSABI, CPU) {} 00383 00384 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override { 00385 return createX86ELFObjectWriter(OS, /*IsELF64*/ true, OSABI, ELF::EM_X86_64); 00386 } 00387 }; 00388 00389 class WindowsX86AsmBackend : public X86AsmBackend { 00390 bool Is64Bit; 00391 00392 public: 00393 WindowsX86AsmBackend(const Target &T, bool is64Bit, StringRef CPU) 00394 : X86AsmBackend(T, CPU) 00395 , Is64Bit(is64Bit) { 00396 } 00397 00398 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override { 00399 return createX86WinCOFFObjectWriter(OS, Is64Bit); 00400 } 00401 }; 00402 00403 namespace CU { 00404 00405 /// Compact unwind encoding values. 00406 enum CompactUnwindEncodings { 00407 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after 00408 /// the return address, then [RE]SP is moved to [RE]BP. 00409 UNWIND_MODE_BP_FRAME = 0x01000000, 00410 00411 /// A frameless function with a small constant stack size. 00412 UNWIND_MODE_STACK_IMMD = 0x02000000, 00413 00414 /// A frameless function with a large constant stack size. 00415 UNWIND_MODE_STACK_IND = 0x03000000, 00416 00417 /// No compact unwind encoding is available. 00418 UNWIND_MODE_DWARF = 0x04000000, 00419 00420 /// Mask for encoding the frame registers. 00421 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF, 00422 00423 /// Mask for encoding the frameless registers. 00424 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF 00425 }; 00426 00427 } // end CU namespace 00428 00429 class DarwinX86AsmBackend : public X86AsmBackend { 00430 const MCRegisterInfo &MRI; 00431 00432 /// \brief Number of registers that can be saved in a compact unwind encoding. 00433 enum { CU_NUM_SAVED_REGS = 6 }; 00434 00435 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS]; 00436 bool Is64Bit; 00437 00438 unsigned OffsetSize; ///< Offset of a "push" instruction. 00439 unsigned MoveInstrSize; ///< Size of a "move" instruction. 00440 unsigned StackDivide; ///< Amount to adjust stack size by. 00441 protected: 00442 /// \brief Size of a "push" instruction for the given register. 00443 unsigned PushInstrSize(unsigned Reg) const { 00444 switch (Reg) { 00445 case X86::EBX: 00446 case X86::ECX: 00447 case X86::EDX: 00448 case X86::EDI: 00449 case X86::ESI: 00450 case X86::EBP: 00451 case X86::RBX: 00452 case X86::RBP: 00453 return 1; 00454 case X86::R12: 00455 case X86::R13: 00456 case X86::R14: 00457 case X86::R15: 00458 return 2; 00459 } 00460 return 1; 00461 } 00462 00463 /// \brief Implementation of algorithm to generate the compact unwind encoding 00464 /// for the CFI instructions. 00465 uint32_t 00466 generateCompactUnwindEncodingImpl(ArrayRef<MCCFIInstruction> Instrs) const { 00467 if (Instrs.empty()) return 0; 00468 00469 // Reset the saved registers. 00470 unsigned SavedRegIdx = 0; 00471 memset(SavedRegs, 0, sizeof(SavedRegs)); 00472 00473 bool HasFP = false; 00474 00475 // Encode that we are using EBP/RBP as the frame pointer. 00476 uint32_t CompactUnwindEncoding = 0; 00477 00478 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2; 00479 unsigned InstrOffset = 0; 00480 unsigned StackAdjust = 0; 00481 unsigned StackSize = 0; 00482 unsigned PrevStackSize = 0; 00483 unsigned NumDefCFAOffsets = 0; 00484 00485 for (unsigned i = 0, e = Instrs.size(); i != e; ++i) { 00486 const MCCFIInstruction &Inst = Instrs[i]; 00487 00488 switch (Inst.getOperation()) { 00489 default: 00490 // Any other CFI directives indicate a frame that we aren't prepared 00491 // to represent via compact unwind, so just bail out. 00492 return 0; 00493 case MCCFIInstruction::OpDefCfaRegister: { 00494 // Defines a frame pointer. E.g. 00495 // 00496 // movq %rsp, %rbp 00497 // L0: 00498 // .cfi_def_cfa_register %rbp 00499 // 00500 HasFP = true; 00501 assert(MRI.getLLVMRegNum(Inst.getRegister(), true) == 00502 (Is64Bit ? X86::RBP : X86::EBP) && "Invalid frame pointer!"); 00503 00504 // Reset the counts. 00505 memset(SavedRegs, 0, sizeof(SavedRegs)); 00506 StackAdjust = 0; 00507 SavedRegIdx = 0; 00508 InstrOffset += MoveInstrSize; 00509 break; 00510 } 00511 case MCCFIInstruction::OpDefCfaOffset: { 00512 // Defines a new offset for the CFA. E.g. 00513 // 00514 // With frame: 00515 // 00516 // pushq %rbp 00517 // L0: 00518 // .cfi_def_cfa_offset 16 00519 // 00520 // Without frame: 00521 // 00522 // subq $72, %rsp 00523 // L0: 00524 // .cfi_def_cfa_offset 80 00525 // 00526 PrevStackSize = StackSize; 00527 StackSize = std::abs(Inst.getOffset()) / StackDivide; 00528 ++NumDefCFAOffsets; 00529 break; 00530 } 00531 case MCCFIInstruction::OpOffset: { 00532 // Defines a "push" of a callee-saved register. E.g. 00533 // 00534 // pushq %r15 00535 // pushq %r14 00536 // pushq %rbx 00537 // L0: 00538 // subq $120, %rsp 00539 // L1: 00540 // .cfi_offset %rbx, -40 00541 // .cfi_offset %r14, -32 00542 // .cfi_offset %r15, -24 00543 // 00544 if (SavedRegIdx == CU_NUM_SAVED_REGS) 00545 // If there are too many saved registers, we cannot use a compact 00546 // unwind encoding. 00547 return CU::UNWIND_MODE_DWARF; 00548 00549 unsigned Reg = MRI.getLLVMRegNum(Inst.getRegister(), true); 00550 SavedRegs[SavedRegIdx++] = Reg; 00551 StackAdjust += OffsetSize; 00552 InstrOffset += PushInstrSize(Reg); 00553 break; 00554 } 00555 } 00556 } 00557 00558 StackAdjust /= StackDivide; 00559 00560 if (HasFP) { 00561 if ((StackAdjust & 0xFF) != StackAdjust) 00562 // Offset was too big for a compact unwind encoding. 00563 return CU::UNWIND_MODE_DWARF; 00564 00565 // Get the encoding of the saved registers when we have a frame pointer. 00566 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame(); 00567 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF; 00568 00569 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME; 00570 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16; 00571 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS; 00572 } else { 00573 // If the amount of the stack allocation is the size of a register, then 00574 // we "push" the RAX/EAX register onto the stack instead of adjusting the 00575 // stack pointer with a SUB instruction. We don't support the push of the 00576 // RAX/EAX register with compact unwind. So we check for that situation 00577 // here. 00578 if ((NumDefCFAOffsets == SavedRegIdx + 1 && 00579 StackSize - PrevStackSize == 1) || 00580 (Instrs.size() == 1 && NumDefCFAOffsets == 1 && StackSize == 2)) 00581 return CU::UNWIND_MODE_DWARF; 00582 00583 SubtractInstrIdx += InstrOffset; 00584 ++StackAdjust; 00585 00586 if ((StackSize & 0xFF) == StackSize) { 00587 // Frameless stack with a small stack size. 00588 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD; 00589 00590 // Encode the stack size. 00591 CompactUnwindEncoding |= (StackSize & 0xFF) << 16; 00592 } else { 00593 if ((StackAdjust & 0x7) != StackAdjust) 00594 // The extra stack adjustments are too big for us to handle. 00595 return CU::UNWIND_MODE_DWARF; 00596 00597 // Frameless stack with an offset too large for us to encode compactly. 00598 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND; 00599 00600 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP' 00601 // instruction. 00602 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16; 00603 00604 // Encode any extra stack stack adjustments (done via push 00605 // instructions). 00606 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13; 00607 } 00608 00609 // Encode the number of registers saved. (Reverse the list first.) 00610 std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]); 00611 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10; 00612 00613 // Get the encoding of the saved registers when we don't have a frame 00614 // pointer. 00615 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx); 00616 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF; 00617 00618 // Encode the register encoding. 00619 CompactUnwindEncoding |= 00620 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION; 00621 } 00622 00623 return CompactUnwindEncoding; 00624 } 00625 00626 private: 00627 /// \brief Get the compact unwind number for a given register. The number 00628 /// corresponds to the enum lists in compact_unwind_encoding.h. 00629 int getCompactUnwindRegNum(unsigned Reg) const { 00630 static const uint16_t CU32BitRegs[7] = { 00631 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 00632 }; 00633 static const uint16_t CU64BitRegs[] = { 00634 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 00635 }; 00636 const uint16_t *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs; 00637 for (int Idx = 1; *CURegs; ++CURegs, ++Idx) 00638 if (*CURegs == Reg) 00639 return Idx; 00640 00641 return -1; 00642 } 00643 00644 /// \brief Return the registers encoded for a compact encoding with a frame 00645 /// pointer. 00646 uint32_t encodeCompactUnwindRegistersWithFrame() const { 00647 // Encode the registers in the order they were saved --- 3-bits per 00648 // register. The list of saved registers is assumed to be in reverse 00649 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS. 00650 uint32_t RegEnc = 0; 00651 for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) { 00652 unsigned Reg = SavedRegs[i]; 00653 if (Reg == 0) break; 00654 00655 int CURegNum = getCompactUnwindRegNum(Reg); 00656 if (CURegNum == -1) return ~0U; 00657 00658 // Encode the 3-bit register number in order, skipping over 3-bits for 00659 // each register. 00660 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3); 00661 } 00662 00663 assert((RegEnc & 0x3FFFF) == RegEnc && 00664 "Invalid compact register encoding!"); 00665 return RegEnc; 00666 } 00667 00668 /// \brief Create the permutation encoding used with frameless stacks. It is 00669 /// passed the number of registers to be saved and an array of the registers 00670 /// saved. 00671 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const { 00672 // The saved registers are numbered from 1 to 6. In order to encode the 00673 // order in which they were saved, we re-number them according to their 00674 // place in the register order. The re-numbering is relative to the last 00675 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in 00676 // that order: 00677 // 00678 // Orig Re-Num 00679 // ---- ------ 00680 // 6 6 00681 // 2 2 00682 // 4 3 00683 // 5 3 00684 // 00685 for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) { 00686 int CUReg = getCompactUnwindRegNum(SavedRegs[i]); 00687 if (CUReg == -1) return ~0U; 00688 SavedRegs[i] = CUReg; 00689 } 00690 00691 // Reverse the list. 00692 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]); 00693 00694 uint32_t RenumRegs[CU_NUM_SAVED_REGS]; 00695 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){ 00696 unsigned Countless = 0; 00697 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j) 00698 if (SavedRegs[j] < SavedRegs[i]) 00699 ++Countless; 00700 00701 RenumRegs[i] = SavedRegs[i] - Countless - 1; 00702 } 00703 00704 // Take the renumbered values and encode them into a 10-bit number. 00705 uint32_t permutationEncoding = 0; 00706 switch (RegCount) { 00707 case 6: 00708 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1] 00709 + 6 * RenumRegs[2] + 2 * RenumRegs[3] 00710 + RenumRegs[4]; 00711 break; 00712 case 5: 00713 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2] 00714 + 6 * RenumRegs[3] + 2 * RenumRegs[4] 00715 + RenumRegs[5]; 00716 break; 00717 case 4: 00718 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3] 00719 + 3 * RenumRegs[4] + RenumRegs[5]; 00720 break; 00721 case 3: 00722 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4] 00723 + RenumRegs[5]; 00724 break; 00725 case 2: 00726 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5]; 00727 break; 00728 case 1: 00729 permutationEncoding |= RenumRegs[5]; 00730 break; 00731 } 00732 00733 assert((permutationEncoding & 0x3FF) == permutationEncoding && 00734 "Invalid compact register encoding!"); 00735 return permutationEncoding; 00736 } 00737 00738 public: 00739 DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI, StringRef CPU, 00740 bool Is64Bit) 00741 : X86AsmBackend(T, CPU), MRI(MRI), Is64Bit(Is64Bit) { 00742 memset(SavedRegs, 0, sizeof(SavedRegs)); 00743 OffsetSize = Is64Bit ? 8 : 4; 00744 MoveInstrSize = Is64Bit ? 3 : 2; 00745 StackDivide = Is64Bit ? 8 : 4; 00746 } 00747 }; 00748 00749 class DarwinX86_32AsmBackend : public DarwinX86AsmBackend { 00750 public: 00751 DarwinX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI, 00752 StringRef CPU) 00753 : DarwinX86AsmBackend(T, MRI, CPU, false) {} 00754 00755 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override { 00756 return createX86MachObjectWriter(OS, /*Is64Bit=*/false, 00757 MachO::CPU_TYPE_I386, 00758 MachO::CPU_SUBTYPE_I386_ALL); 00759 } 00760 00761 /// \brief Generate the compact unwind encoding for the CFI instructions. 00762 uint32_t generateCompactUnwindEncoding( 00763 ArrayRef<MCCFIInstruction> Instrs) const override { 00764 return generateCompactUnwindEncodingImpl(Instrs); 00765 } 00766 }; 00767 00768 class DarwinX86_64AsmBackend : public DarwinX86AsmBackend { 00769 const MachO::CPUSubTypeX86 Subtype; 00770 public: 00771 DarwinX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI, 00772 StringRef CPU, MachO::CPUSubTypeX86 st) 00773 : DarwinX86AsmBackend(T, MRI, CPU, true), Subtype(st) {} 00774 00775 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override { 00776 return createX86MachObjectWriter(OS, /*Is64Bit=*/true, 00777 MachO::CPU_TYPE_X86_64, Subtype); 00778 } 00779 00780 bool doesSectionRequireSymbols(const MCSection &Section) const override { 00781 // Temporary labels in the string literals sections require symbols. The 00782 // issue is that the x86_64 relocation format does not allow symbol + 00783 // offset, and so the linker does not have enough information to resolve the 00784 // access to the appropriate atom unless an external relocation is used. For 00785 // non-cstring sections, we expect the compiler to use a non-temporary label 00786 // for anything that could have an addend pointing outside the symbol. 00787 // 00788 // See <rdar://problem/4765733>. 00789 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section); 00790 return SMO.getType() == MachO::S_CSTRING_LITERALS; 00791 } 00792 00793 bool isSectionAtomizable(const MCSection &Section) const override { 00794 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section); 00795 // Fixed sized data sections are uniqued, they cannot be diced into atoms. 00796 switch (SMO.getType()) { 00797 default: 00798 return true; 00799 00800 case MachO::S_4BYTE_LITERALS: 00801 case MachO::S_8BYTE_LITERALS: 00802 case MachO::S_16BYTE_LITERALS: 00803 case MachO::S_LITERAL_POINTERS: 00804 case MachO::S_NON_LAZY_SYMBOL_POINTERS: 00805 case MachO::S_LAZY_SYMBOL_POINTERS: 00806 case MachO::S_MOD_INIT_FUNC_POINTERS: 00807 case MachO::S_MOD_TERM_FUNC_POINTERS: 00808 case MachO::S_INTERPOSING: 00809 return false; 00810 } 00811 } 00812 00813 /// \brief Generate the compact unwind encoding for the CFI instructions. 00814 uint32_t generateCompactUnwindEncoding( 00815 ArrayRef<MCCFIInstruction> Instrs) const override { 00816 return generateCompactUnwindEncodingImpl(Instrs); 00817 } 00818 }; 00819 00820 } // end anonymous namespace 00821 00822 MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T, 00823 const MCRegisterInfo &MRI, 00824 StringRef TT, 00825 StringRef CPU) { 00826 Triple TheTriple(TT); 00827 00828 if (TheTriple.isOSBinFormatMachO()) 00829 return new DarwinX86_32AsmBackend(T, MRI, CPU); 00830 00831 if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF()) 00832 return new WindowsX86AsmBackend(T, false, CPU); 00833 00834 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 00835 return new ELFX86_32AsmBackend(T, OSABI, CPU); 00836 } 00837 00838 MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, 00839 const MCRegisterInfo &MRI, 00840 StringRef TT, 00841 StringRef CPU) { 00842 Triple TheTriple(TT); 00843 00844 if (TheTriple.isOSBinFormatMachO()) { 00845 MachO::CPUSubTypeX86 CS = 00846 StringSwitch<MachO::CPUSubTypeX86>(TheTriple.getArchName()) 00847 .Case("x86_64h", MachO::CPU_SUBTYPE_X86_64_H) 00848 .Default(MachO::CPU_SUBTYPE_X86_64_ALL); 00849 return new DarwinX86_64AsmBackend(T, MRI, CPU, CS); 00850 } 00851 00852 if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF()) 00853 return new WindowsX86AsmBackend(T, true, CPU); 00854 00855 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 00856 00857 if (TheTriple.getEnvironment() == Triple::GNUX32) 00858 return new ELFX86_X32AsmBackend(T, OSABI, CPU); 00859 return new ELFX86_64AsmBackend(T, OSABI, CPU); 00860 }