LLVM API Documentation
00001 //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 00010 #include "AArch64.h" 00011 #include "AArch64RegisterInfo.h" 00012 #include "MCTargetDesc/AArch64FixupKinds.h" 00013 #include "llvm/ADT/Triple.h" 00014 #include "llvm/MC/MCAsmBackend.h" 00015 #include "llvm/MC/MCDirectives.h" 00016 #include "llvm/MC/MCFixupKindInfo.h" 00017 #include "llvm/MC/MCELFObjectWriter.h" 00018 #include "llvm/MC/MCObjectWriter.h" 00019 #include "llvm/MC/MCSectionELF.h" 00020 #include "llvm/MC/MCSectionMachO.h" 00021 #include "llvm/Support/ErrorHandling.h" 00022 #include "llvm/Support/MachO.h" 00023 using namespace llvm; 00024 00025 namespace { 00026 00027 class AArch64AsmBackend : public MCAsmBackend { 00028 static const unsigned PCRelFlagVal = 00029 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel; 00030 00031 public: 00032 AArch64AsmBackend(const Target &T) : MCAsmBackend() {} 00033 00034 unsigned getNumFixupKinds() const override { 00035 return AArch64::NumTargetFixupKinds; 00036 } 00037 00038 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override { 00039 const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = { 00040 // This table *must* be in the order that the fixup_* kinds are defined in 00041 // AArch64FixupKinds.h. 00042 // 00043 // Name Offset (bits) Size (bits) Flags 00044 { "fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal }, 00045 { "fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal }, 00046 { "fixup_aarch64_add_imm12", 10, 12, 0 }, 00047 { "fixup_aarch64_ldst_imm12_scale1", 10, 12, 0 }, 00048 { "fixup_aarch64_ldst_imm12_scale2", 10, 12, 0 }, 00049 { "fixup_aarch64_ldst_imm12_scale4", 10, 12, 0 }, 00050 { "fixup_aarch64_ldst_imm12_scale8", 10, 12, 0 }, 00051 { "fixup_aarch64_ldst_imm12_scale16", 10, 12, 0 }, 00052 { "fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal }, 00053 { "fixup_aarch64_movw", 5, 16, 0 }, 00054 { "fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal }, 00055 { "fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal }, 00056 { "fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal }, 00057 { "fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal }, 00058 { "fixup_aarch64_tlsdesc_call", 0, 0, 0 } 00059 }; 00060 00061 if (Kind < FirstTargetFixupKind) 00062 return MCAsmBackend::getFixupKindInfo(Kind); 00063 00064 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 00065 "Invalid kind!"); 00066 return Infos[Kind - FirstTargetFixupKind]; 00067 } 00068 00069 void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, 00070 uint64_t Value, bool IsPCRel) const override; 00071 00072 bool mayNeedRelaxation(const MCInst &Inst) const override; 00073 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, 00074 const MCRelaxableFragment *DF, 00075 const MCAsmLayout &Layout) const override; 00076 void relaxInstruction(const MCInst &Inst, MCInst &Res) const override; 00077 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override; 00078 00079 void HandleAssemblerFlag(MCAssemblerFlag Flag) {} 00080 00081 unsigned getPointerSize() const { return 8; } 00082 }; 00083 00084 } // end anonymous namespace 00085 00086 /// \brief The number of bytes the fixup may change. 00087 static unsigned getFixupKindNumBytes(unsigned Kind) { 00088 switch (Kind) { 00089 default: 00090 llvm_unreachable("Unknown fixup kind!"); 00091 00092 case AArch64::fixup_aarch64_tlsdesc_call: 00093 return 0; 00094 00095 case FK_Data_1: 00096 return 1; 00097 00098 case FK_Data_2: 00099 case AArch64::fixup_aarch64_movw: 00100 return 2; 00101 00102 case AArch64::fixup_aarch64_pcrel_branch14: 00103 case AArch64::fixup_aarch64_add_imm12: 00104 case AArch64::fixup_aarch64_ldst_imm12_scale1: 00105 case AArch64::fixup_aarch64_ldst_imm12_scale2: 00106 case AArch64::fixup_aarch64_ldst_imm12_scale4: 00107 case AArch64::fixup_aarch64_ldst_imm12_scale8: 00108 case AArch64::fixup_aarch64_ldst_imm12_scale16: 00109 case AArch64::fixup_aarch64_ldr_pcrel_imm19: 00110 case AArch64::fixup_aarch64_pcrel_branch19: 00111 return 3; 00112 00113 case AArch64::fixup_aarch64_pcrel_adr_imm21: 00114 case AArch64::fixup_aarch64_pcrel_adrp_imm21: 00115 case AArch64::fixup_aarch64_pcrel_branch26: 00116 case AArch64::fixup_aarch64_pcrel_call26: 00117 case FK_Data_4: 00118 return 4; 00119 00120 case FK_Data_8: 00121 return 8; 00122 } 00123 } 00124 00125 static unsigned AdrImmBits(unsigned Value) { 00126 unsigned lo2 = Value & 0x3; 00127 unsigned hi19 = (Value & 0x1ffffc) >> 2; 00128 return (hi19 << 5) | (lo2 << 29); 00129 } 00130 00131 static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) { 00132 int64_t SignedValue = static_cast<int64_t>(Value); 00133 switch (Kind) { 00134 default: 00135 assert(false && "Unknown fixup kind!"); 00136 case AArch64::fixup_aarch64_pcrel_adr_imm21: 00137 if (SignedValue > 2097151 || SignedValue < -2097152) 00138 report_fatal_error("fixup value out of range"); 00139 return AdrImmBits(Value & 0x1fffffULL); 00140 case AArch64::fixup_aarch64_pcrel_adrp_imm21: 00141 return AdrImmBits((Value & 0x1fffff000ULL) >> 12); 00142 case AArch64::fixup_aarch64_ldr_pcrel_imm19: 00143 case AArch64::fixup_aarch64_pcrel_branch19: 00144 // Signed 21-bit immediate 00145 if (SignedValue > 2097151 || SignedValue < -2097152) 00146 report_fatal_error("fixup value out of range"); 00147 // Low two bits are not encoded. 00148 return (Value >> 2) & 0x7ffff; 00149 case AArch64::fixup_aarch64_add_imm12: 00150 case AArch64::fixup_aarch64_ldst_imm12_scale1: 00151 // Unsigned 12-bit immediate 00152 if (Value >= 0x1000) 00153 report_fatal_error("invalid imm12 fixup value"); 00154 return Value; 00155 case AArch64::fixup_aarch64_ldst_imm12_scale2: 00156 // Unsigned 12-bit immediate which gets multiplied by 2 00157 if (Value & 1 || Value >= 0x2000) 00158 report_fatal_error("invalid imm12 fixup value"); 00159 return Value >> 1; 00160 case AArch64::fixup_aarch64_ldst_imm12_scale4: 00161 // Unsigned 12-bit immediate which gets multiplied by 4 00162 if (Value & 3 || Value >= 0x4000) 00163 report_fatal_error("invalid imm12 fixup value"); 00164 return Value >> 2; 00165 case AArch64::fixup_aarch64_ldst_imm12_scale8: 00166 // Unsigned 12-bit immediate which gets multiplied by 8 00167 if (Value & 7 || Value >= 0x8000) 00168 report_fatal_error("invalid imm12 fixup value"); 00169 return Value >> 3; 00170 case AArch64::fixup_aarch64_ldst_imm12_scale16: 00171 // Unsigned 12-bit immediate which gets multiplied by 16 00172 if (Value & 15 || Value >= 0x10000) 00173 report_fatal_error("invalid imm12 fixup value"); 00174 return Value >> 4; 00175 case AArch64::fixup_aarch64_movw: 00176 report_fatal_error("no resolvable MOVZ/MOVK fixups supported yet"); 00177 return Value; 00178 case AArch64::fixup_aarch64_pcrel_branch14: 00179 // Signed 16-bit immediate 00180 if (SignedValue > 32767 || SignedValue < -32768) 00181 report_fatal_error("fixup value out of range"); 00182 // Low two bits are not encoded (4-byte alignment assumed). 00183 if (Value & 0x3) 00184 report_fatal_error("fixup not sufficiently aligned"); 00185 return (Value >> 2) & 0x3fff; 00186 case AArch64::fixup_aarch64_pcrel_branch26: 00187 case AArch64::fixup_aarch64_pcrel_call26: 00188 // Signed 28-bit immediate 00189 if (SignedValue > 134217727 || SignedValue < -134217728) 00190 report_fatal_error("fixup value out of range"); 00191 // Low two bits are not encoded (4-byte alignment assumed). 00192 if (Value & 0x3) 00193 report_fatal_error("fixup not sufficiently aligned"); 00194 return (Value >> 2) & 0x3ffffff; 00195 case FK_Data_1: 00196 case FK_Data_2: 00197 case FK_Data_4: 00198 case FK_Data_8: 00199 return Value; 00200 } 00201 } 00202 00203 void AArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data, 00204 unsigned DataSize, uint64_t Value, 00205 bool IsPCRel) const { 00206 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); 00207 if (!Value) 00208 return; // Doesn't change encoding. 00209 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); 00210 // Apply any target-specific value adjustments. 00211 Value = adjustFixupValue(Fixup.getKind(), Value); 00212 00213 // Shift the value into position. 00214 Value <<= Info.TargetOffset; 00215 00216 unsigned Offset = Fixup.getOffset(); 00217 assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!"); 00218 00219 // For each byte of the fragment that the fixup touches, mask in the 00220 // bits from the fixup value. 00221 for (unsigned i = 0; i != NumBytes; ++i) 00222 Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); 00223 } 00224 00225 bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst) const { 00226 return false; 00227 } 00228 00229 bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, 00230 uint64_t Value, 00231 const MCRelaxableFragment *DF, 00232 const MCAsmLayout &Layout) const { 00233 // FIXME: This isn't correct for AArch64. Just moving the "generic" logic 00234 // into the targets for now. 00235 // 00236 // Relax if the value is too big for a (signed) i8. 00237 return int64_t(Value) != int64_t(int8_t(Value)); 00238 } 00239 00240 void AArch64AsmBackend::relaxInstruction(const MCInst &Inst, 00241 MCInst &Res) const { 00242 assert(false && "AArch64AsmBackend::relaxInstruction() unimplemented"); 00243 } 00244 00245 bool AArch64AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { 00246 // If the count is not 4-byte aligned, we must be writing data into the text 00247 // section (otherwise we have unaligned instructions, and thus have far 00248 // bigger problems), so just write zeros instead. 00249 if ((Count & 3) != 0) { 00250 for (uint64_t i = 0, e = (Count & 3); i != e; ++i) 00251 OW->Write8(0); 00252 } 00253 00254 // We are properly aligned, so write NOPs as requested. 00255 Count /= 4; 00256 for (uint64_t i = 0; i != Count; ++i) 00257 OW->Write32(0xd503201f); 00258 return true; 00259 } 00260 00261 namespace { 00262 00263 namespace CU { 00264 00265 /// \brief Compact unwind encoding values. 00266 enum CompactUnwindEncodings { 00267 /// \brief A "frameless" leaf function, where no non-volatile registers are 00268 /// saved. The return remains in LR throughout the function. 00269 UNWIND_AArch64_MODE_FRAMELESS = 0x02000000, 00270 00271 /// \brief No compact unwind encoding available. Instead the low 23-bits of 00272 /// the compact unwind encoding is the offset of the DWARF FDE in the 00273 /// __eh_frame section. This mode is never used in object files. It is only 00274 /// generated by the linker in final linked images, which have only DWARF info 00275 /// for a function. 00276 UNWIND_AArch64_MODE_DWARF = 0x03000000, 00277 00278 /// \brief This is a standard arm64 prologue where FP/LR are immediately 00279 /// pushed on the stack, then SP is copied to FP. If there are any 00280 /// non-volatile register saved, they are copied into the stack fame in pairs 00281 /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the 00282 /// five X pairs and four D pairs can be saved, but the memory layout must be 00283 /// in register number order. 00284 UNWIND_AArch64_MODE_FRAME = 0x04000000, 00285 00286 /// \brief Frame register pair encodings. 00287 UNWIND_AArch64_FRAME_X19_X20_PAIR = 0x00000001, 00288 UNWIND_AArch64_FRAME_X21_X22_PAIR = 0x00000002, 00289 UNWIND_AArch64_FRAME_X23_X24_PAIR = 0x00000004, 00290 UNWIND_AArch64_FRAME_X25_X26_PAIR = 0x00000008, 00291 UNWIND_AArch64_FRAME_X27_X28_PAIR = 0x00000010, 00292 UNWIND_AArch64_FRAME_D8_D9_PAIR = 0x00000100, 00293 UNWIND_AArch64_FRAME_D10_D11_PAIR = 0x00000200, 00294 UNWIND_AArch64_FRAME_D12_D13_PAIR = 0x00000400, 00295 UNWIND_AArch64_FRAME_D14_D15_PAIR = 0x00000800 00296 }; 00297 00298 } // end CU namespace 00299 00300 // FIXME: This should be in a separate file. 00301 class DarwinAArch64AsmBackend : public AArch64AsmBackend { 00302 const MCRegisterInfo &MRI; 00303 00304 /// \brief Encode compact unwind stack adjustment for frameless functions. 00305 /// See UNWIND_AArch64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h. 00306 /// The stack size always needs to be 16 byte aligned. 00307 uint32_t encodeStackAdjustment(uint32_t StackSize) const { 00308 return (StackSize / 16) << 12; 00309 } 00310 00311 public: 00312 DarwinAArch64AsmBackend(const Target &T, const MCRegisterInfo &MRI) 00313 : AArch64AsmBackend(T), MRI(MRI) {} 00314 00315 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override { 00316 return createAArch64MachObjectWriter(OS, MachO::CPU_TYPE_ARM64, 00317 MachO::CPU_SUBTYPE_ARM64_ALL); 00318 } 00319 00320 bool doesSectionRequireSymbols(const MCSection &Section) const override { 00321 // Any section for which the linker breaks things into atoms needs to 00322 // preserve symbols, including assembler local symbols, to identify 00323 // those atoms. These sections are: 00324 // Sections of type: 00325 // 00326 // S_CSTRING_LITERALS (e.g. __cstring) 00327 // S_LITERAL_POINTERS (e.g. objc selector pointers) 00328 // S_16BYTE_LITERALS, S_8BYTE_LITERALS, S_4BYTE_LITERALS 00329 // 00330 // Sections named: 00331 // 00332 // __TEXT,__eh_frame 00333 // __TEXT,__ustring 00334 // __DATA,__cfstring 00335 // __DATA,__objc_classrefs 00336 // __DATA,__objc_catlist 00337 // 00338 // FIXME: It would be better if the compiler used actual linker local 00339 // symbols for each of these sections rather than preserving what 00340 // are ostensibly assembler local symbols. 00341 const MCSectionMachO &SMO = static_cast<const MCSectionMachO &>(Section); 00342 return (SMO.getType() == MachO::S_CSTRING_LITERALS || 00343 SMO.getType() == MachO::S_4BYTE_LITERALS || 00344 SMO.getType() == MachO::S_8BYTE_LITERALS || 00345 SMO.getType() == MachO::S_16BYTE_LITERALS || 00346 SMO.getType() == MachO::S_LITERAL_POINTERS || 00347 (SMO.getSegmentName() == "__TEXT" && 00348 (SMO.getSectionName() == "__eh_frame" || 00349 SMO.getSectionName() == "__ustring")) || 00350 (SMO.getSegmentName() == "__DATA" && 00351 (SMO.getSectionName() == "__cfstring" || 00352 SMO.getSectionName() == "__objc_classrefs" || 00353 SMO.getSectionName() == "__objc_catlist"))); 00354 } 00355 00356 /// \brief Generate the compact unwind encoding from the CFI directives. 00357 uint32_t generateCompactUnwindEncoding( 00358 ArrayRef<MCCFIInstruction> Instrs) const override { 00359 if (Instrs.empty()) 00360 return CU::UNWIND_AArch64_MODE_FRAMELESS; 00361 00362 bool HasFP = false; 00363 unsigned StackSize = 0; 00364 00365 uint32_t CompactUnwindEncoding = 0; 00366 for (size_t i = 0, e = Instrs.size(); i != e; ++i) { 00367 const MCCFIInstruction &Inst = Instrs[i]; 00368 00369 switch (Inst.getOperation()) { 00370 default: 00371 // Cannot handle this directive: bail out. 00372 return CU::UNWIND_AArch64_MODE_DWARF; 00373 case MCCFIInstruction::OpDefCfa: { 00374 // Defines a frame pointer. 00375 assert(getXRegFromWReg(MRI.getLLVMRegNum(Inst.getRegister(), true)) == 00376 AArch64::FP && 00377 "Invalid frame pointer!"); 00378 assert(i + 2 < e && "Insufficient CFI instructions to define a frame!"); 00379 00380 const MCCFIInstruction &LRPush = Instrs[++i]; 00381 assert(LRPush.getOperation() == MCCFIInstruction::OpOffset && 00382 "Link register not pushed!"); 00383 const MCCFIInstruction &FPPush = Instrs[++i]; 00384 assert(FPPush.getOperation() == MCCFIInstruction::OpOffset && 00385 "Frame pointer not pushed!"); 00386 00387 unsigned LRReg = MRI.getLLVMRegNum(LRPush.getRegister(), true); 00388 unsigned FPReg = MRI.getLLVMRegNum(FPPush.getRegister(), true); 00389 00390 LRReg = getXRegFromWReg(LRReg); 00391 FPReg = getXRegFromWReg(FPReg); 00392 00393 assert(LRReg == AArch64::LR && FPReg == AArch64::FP && 00394 "Pushing invalid registers for frame!"); 00395 00396 // Indicate that the function has a frame. 00397 CompactUnwindEncoding |= CU::UNWIND_AArch64_MODE_FRAME; 00398 HasFP = true; 00399 break; 00400 } 00401 case MCCFIInstruction::OpDefCfaOffset: { 00402 assert(StackSize == 0 && "We already have the CFA offset!"); 00403 StackSize = std::abs(Inst.getOffset()); 00404 break; 00405 } 00406 case MCCFIInstruction::OpOffset: { 00407 // Registers are saved in pairs. We expect there to be two consecutive 00408 // `.cfi_offset' instructions with the appropriate registers specified. 00409 unsigned Reg1 = MRI.getLLVMRegNum(Inst.getRegister(), true); 00410 if (i + 1 == e) 00411 return CU::UNWIND_AArch64_MODE_DWARF; 00412 00413 const MCCFIInstruction &Inst2 = Instrs[++i]; 00414 if (Inst2.getOperation() != MCCFIInstruction::OpOffset) 00415 return CU::UNWIND_AArch64_MODE_DWARF; 00416 unsigned Reg2 = MRI.getLLVMRegNum(Inst2.getRegister(), true); 00417 00418 // N.B. The encodings must be in register number order, and the X 00419 // registers before the D registers. 00420 00421 // X19/X20 pair = 0x00000001, 00422 // X21/X22 pair = 0x00000002, 00423 // X23/X24 pair = 0x00000004, 00424 // X25/X26 pair = 0x00000008, 00425 // X27/X28 pair = 0x00000010 00426 Reg1 = getXRegFromWReg(Reg1); 00427 Reg2 = getXRegFromWReg(Reg2); 00428 00429 if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 && 00430 (CompactUnwindEncoding & 0xF1E) == 0) 00431 CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X19_X20_PAIR; 00432 else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 && 00433 (CompactUnwindEncoding & 0xF1C) == 0) 00434 CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X21_X22_PAIR; 00435 else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 && 00436 (CompactUnwindEncoding & 0xF18) == 0) 00437 CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X23_X24_PAIR; 00438 else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 && 00439 (CompactUnwindEncoding & 0xF10) == 0) 00440 CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X25_X26_PAIR; 00441 else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 && 00442 (CompactUnwindEncoding & 0xF00) == 0) 00443 CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X27_X28_PAIR; 00444 else { 00445 Reg1 = getDRegFromBReg(Reg1); 00446 Reg2 = getDRegFromBReg(Reg2); 00447 00448 // D8/D9 pair = 0x00000100, 00449 // D10/D11 pair = 0x00000200, 00450 // D12/D13 pair = 0x00000400, 00451 // D14/D15 pair = 0x00000800 00452 if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 && 00453 (CompactUnwindEncoding & 0xE00) == 0) 00454 CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D8_D9_PAIR; 00455 else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 && 00456 (CompactUnwindEncoding & 0xC00) == 0) 00457 CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D10_D11_PAIR; 00458 else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 && 00459 (CompactUnwindEncoding & 0x800) == 0) 00460 CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D12_D13_PAIR; 00461 else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15) 00462 CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D14_D15_PAIR; 00463 else 00464 // A pair was pushed which we cannot handle. 00465 return CU::UNWIND_AArch64_MODE_DWARF; 00466 } 00467 00468 break; 00469 } 00470 } 00471 } 00472 00473 if (!HasFP) { 00474 // With compact unwind info we can only represent stack adjustments of up 00475 // to 65520 bytes. 00476 if (StackSize > 65520) 00477 return CU::UNWIND_AArch64_MODE_DWARF; 00478 00479 CompactUnwindEncoding |= CU::UNWIND_AArch64_MODE_FRAMELESS; 00480 CompactUnwindEncoding |= encodeStackAdjustment(StackSize); 00481 } 00482 00483 return CompactUnwindEncoding; 00484 } 00485 }; 00486 00487 } // end anonymous namespace 00488 00489 namespace { 00490 00491 class ELFAArch64AsmBackend : public AArch64AsmBackend { 00492 public: 00493 uint8_t OSABI; 00494 bool IsLittleEndian; 00495 00496 ELFAArch64AsmBackend(const Target &T, uint8_t OSABI, bool IsLittleEndian) 00497 : AArch64AsmBackend(T), OSABI(OSABI), IsLittleEndian(IsLittleEndian) {} 00498 00499 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override { 00500 return createAArch64ELFObjectWriter(OS, OSABI, IsLittleEndian); 00501 } 00502 00503 void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout, 00504 const MCFixup &Fixup, const MCFragment *DF, 00505 const MCValue &Target, uint64_t &Value, 00506 bool &IsResolved) override; 00507 00508 void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, 00509 uint64_t Value, bool IsPCRel) const override; 00510 }; 00511 00512 void ELFAArch64AsmBackend::processFixupValue( 00513 const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFixup &Fixup, 00514 const MCFragment *DF, const MCValue &Target, uint64_t &Value, 00515 bool &IsResolved) { 00516 // The ADRP instruction adds some multiple of 0x1000 to the current PC & 00517 // ~0xfff. This means that the required offset to reach a symbol can vary by 00518 // up to one step depending on where the ADRP is in memory. For example: 00519 // 00520 // ADRP x0, there 00521 // there: 00522 // 00523 // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and 00524 // we'll need that as an offset. At any other address "there" will be in the 00525 // same page as the ADRP and the instruction should encode 0x0. Assuming the 00526 // section isn't 0x1000-aligned, we therefore need to delegate this decision 00527 // to the linker -- a relocation! 00528 if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21) 00529 IsResolved = false; 00530 } 00531 00532 void ELFAArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data, 00533 unsigned DataSize, uint64_t Value, 00534 bool IsPCRel) const { 00535 // store fixups in .eh_frame section in big endian order 00536 if (!IsLittleEndian && Fixup.getKind() == FK_Data_4) { 00537 const MCSection *Sec = Fixup.getValue()->FindAssociatedSection(); 00538 const MCSectionELF *SecELF = static_cast<const MCSectionELF *>(Sec); 00539 if (SecELF->getSectionName() == ".eh_frame") 00540 Value = ByteSwap_32(unsigned(Value)); 00541 } 00542 AArch64AsmBackend::applyFixup (Fixup, Data, DataSize, Value, IsPCRel); 00543 } 00544 } 00545 00546 MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T, 00547 const MCRegisterInfo &MRI, 00548 StringRef TT, StringRef CPU) { 00549 Triple TheTriple(TT); 00550 00551 if (TheTriple.isOSDarwin()) 00552 return new DarwinAArch64AsmBackend(T, MRI); 00553 00554 assert(TheTriple.isOSBinFormatELF() && "Expect either MachO or ELF target"); 00555 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 00556 return new ELFAArch64AsmBackend(T, OSABI, /*IsLittleEndian=*/true); 00557 } 00558 00559 MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T, 00560 const MCRegisterInfo &MRI, 00561 StringRef TT, StringRef CPU) { 00562 Triple TheTriple(TT); 00563 00564 assert(TheTriple.isOSBinFormatELF() && 00565 "Big endian is only supported for ELF targets!"); 00566 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 00567 return new ELFAArch64AsmBackend(T, OSABI, 00568 /*IsLittleEndian=*/false); 00569 }