LLVM API Documentation
00001 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This file implements the SystemZTargetLowering class. 00011 // 00012 //===----------------------------------------------------------------------===// 00013 00014 #include "SystemZISelLowering.h" 00015 #include "SystemZCallingConv.h" 00016 #include "SystemZConstantPoolValue.h" 00017 #include "SystemZMachineFunctionInfo.h" 00018 #include "SystemZTargetMachine.h" 00019 #include "llvm/CodeGen/CallingConvLower.h" 00020 #include "llvm/CodeGen/MachineInstrBuilder.h" 00021 #include "llvm/CodeGen/MachineRegisterInfo.h" 00022 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 00023 #include <cctype> 00024 00025 using namespace llvm; 00026 00027 #define DEBUG_TYPE "systemz-lower" 00028 00029 namespace { 00030 // Represents a sequence for extracting a 0/1 value from an IPM result: 00031 // (((X ^ XORValue) + AddValue) >> Bit) 00032 struct IPMConversion { 00033 IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit) 00034 : XORValue(xorValue), AddValue(addValue), Bit(bit) {} 00035 00036 int64_t XORValue; 00037 int64_t AddValue; 00038 unsigned Bit; 00039 }; 00040 00041 // Represents information about a comparison. 00042 struct Comparison { 00043 Comparison(SDValue Op0In, SDValue Op1In) 00044 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {} 00045 00046 // The operands to the comparison. 00047 SDValue Op0, Op1; 00048 00049 // The opcode that should be used to compare Op0 and Op1. 00050 unsigned Opcode; 00051 00052 // A SystemZICMP value. Only used for integer comparisons. 00053 unsigned ICmpType; 00054 00055 // The mask of CC values that Opcode can produce. 00056 unsigned CCValid; 00057 00058 // The mask of CC values for which the original condition is true. 00059 unsigned CCMask; 00060 }; 00061 } // end anonymous namespace 00062 00063 // Classify VT as either 32 or 64 bit. 00064 static bool is32Bit(EVT VT) { 00065 switch (VT.getSimpleVT().SimpleTy) { 00066 case MVT::i32: 00067 return true; 00068 case MVT::i64: 00069 return false; 00070 default: 00071 llvm_unreachable("Unsupported type"); 00072 } 00073 } 00074 00075 // Return a version of MachineOperand that can be safely used before the 00076 // final use. 00077 static MachineOperand earlyUseOperand(MachineOperand Op) { 00078 if (Op.isReg()) 00079 Op.setIsKill(false); 00080 return Op; 00081 } 00082 00083 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &tm) 00084 : TargetLowering(tm, new TargetLoweringObjectFileELF()), 00085 Subtarget(tm.getSubtarget<SystemZSubtarget>()) { 00086 MVT PtrVT = getPointerTy(); 00087 00088 // Set up the register classes. 00089 if (Subtarget.hasHighWord()) 00090 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass); 00091 else 00092 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 00093 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 00094 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 00095 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 00096 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 00097 00098 // Compute derived properties from the register classes 00099 computeRegisterProperties(); 00100 00101 // Set up special registers. 00102 setExceptionPointerRegister(SystemZ::R6D); 00103 setExceptionSelectorRegister(SystemZ::R7D); 00104 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 00105 00106 // TODO: It may be better to default to latency-oriented scheduling, however 00107 // LLVM's current latency-oriented scheduler can't handle physreg definitions 00108 // such as SystemZ has with CC, so set this to the register-pressure 00109 // scheduler, because it can. 00110 setSchedulingPreference(Sched::RegPressure); 00111 00112 setBooleanContents(ZeroOrOneBooleanContent); 00113 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 00114 00115 // Instructions are strings of 2-byte aligned 2-byte values. 00116 setMinFunctionAlignment(2); 00117 00118 // Handle operations that are handled in a similar way for all types. 00119 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 00120 I <= MVT::LAST_FP_VALUETYPE; 00121 ++I) { 00122 MVT VT = MVT::SimpleValueType(I); 00123 if (isTypeLegal(VT)) { 00124 // Lower SET_CC into an IPM-based sequence. 00125 setOperationAction(ISD::SETCC, VT, Custom); 00126 00127 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 00128 setOperationAction(ISD::SELECT, VT, Expand); 00129 00130 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 00131 setOperationAction(ISD::SELECT_CC, VT, Custom); 00132 setOperationAction(ISD::BR_CC, VT, Custom); 00133 } 00134 } 00135 00136 // Expand jump table branches as address arithmetic followed by an 00137 // indirect jump. 00138 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 00139 00140 // Expand BRCOND into a BR_CC (see above). 00141 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 00142 00143 // Handle integer types. 00144 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 00145 I <= MVT::LAST_INTEGER_VALUETYPE; 00146 ++I) { 00147 MVT VT = MVT::SimpleValueType(I); 00148 if (isTypeLegal(VT)) { 00149 // Expand individual DIV and REMs into DIVREMs. 00150 setOperationAction(ISD::SDIV, VT, Expand); 00151 setOperationAction(ISD::UDIV, VT, Expand); 00152 setOperationAction(ISD::SREM, VT, Expand); 00153 setOperationAction(ISD::UREM, VT, Expand); 00154 setOperationAction(ISD::SDIVREM, VT, Custom); 00155 setOperationAction(ISD::UDIVREM, VT, Custom); 00156 00157 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and 00158 // stores, putting a serialization instruction after the stores. 00159 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom); 00160 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 00161 00162 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are 00163 // available, or if the operand is constant. 00164 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 00165 00166 // No special instructions for these. 00167 setOperationAction(ISD::CTPOP, VT, Expand); 00168 setOperationAction(ISD::CTTZ, VT, Expand); 00169 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 00170 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 00171 setOperationAction(ISD::ROTR, VT, Expand); 00172 00173 // Use *MUL_LOHI where possible instead of MULH*. 00174 setOperationAction(ISD::MULHS, VT, Expand); 00175 setOperationAction(ISD::MULHU, VT, Expand); 00176 setOperationAction(ISD::SMUL_LOHI, VT, Custom); 00177 setOperationAction(ISD::UMUL_LOHI, VT, Custom); 00178 00179 // Only z196 and above have native support for conversions to unsigned. 00180 if (!Subtarget.hasFPExtension()) 00181 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 00182 } 00183 } 00184 00185 // Type legalization will convert 8- and 16-bit atomic operations into 00186 // forms that operate on i32s (but still keeping the original memory VT). 00187 // Lower them into full i32 operations. 00188 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 00189 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 00190 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 00191 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 00192 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 00193 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 00194 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 00195 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 00196 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 00197 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 00198 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 00199 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 00200 00201 // z10 has instructions for signed but not unsigned FP conversion. 00202 // Handle unsigned 32-bit types as signed 64-bit types. 00203 if (!Subtarget.hasFPExtension()) { 00204 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 00205 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 00206 } 00207 00208 // We have native support for a 64-bit CTLZ, via FLOGR. 00209 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 00210 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 00211 00212 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 00213 setOperationAction(ISD::OR, MVT::i64, Custom); 00214 00215 // FIXME: Can we support these natively? 00216 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 00217 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 00218 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 00219 00220 // We have native instructions for i8, i16 and i32 extensions, but not i1. 00221 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 00222 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 00223 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 00224 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 00225 00226 // Handle the various types of symbolic address. 00227 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 00228 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 00229 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 00230 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 00231 setOperationAction(ISD::JumpTable, PtrVT, Custom); 00232 00233 // We need to handle dynamic allocations specially because of the 00234 // 160-byte area at the bottom of the stack. 00235 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 00236 00237 // Use custom expanders so that we can force the function to use 00238 // a frame pointer. 00239 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 00240 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 00241 00242 // Handle prefetches with PFD or PFDRL. 00243 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 00244 00245 // Handle floating-point types. 00246 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 00247 I <= MVT::LAST_FP_VALUETYPE; 00248 ++I) { 00249 MVT VT = MVT::SimpleValueType(I); 00250 if (isTypeLegal(VT)) { 00251 // We can use FI for FRINT. 00252 setOperationAction(ISD::FRINT, VT, Legal); 00253 00254 // We can use the extended form of FI for other rounding operations. 00255 if (Subtarget.hasFPExtension()) { 00256 setOperationAction(ISD::FNEARBYINT, VT, Legal); 00257 setOperationAction(ISD::FFLOOR, VT, Legal); 00258 setOperationAction(ISD::FCEIL, VT, Legal); 00259 setOperationAction(ISD::FTRUNC, VT, Legal); 00260 setOperationAction(ISD::FROUND, VT, Legal); 00261 } 00262 00263 // No special instructions for these. 00264 setOperationAction(ISD::FSIN, VT, Expand); 00265 setOperationAction(ISD::FCOS, VT, Expand); 00266 setOperationAction(ISD::FREM, VT, Expand); 00267 } 00268 } 00269 00270 // We have fused multiply-addition for f32 and f64 but not f128. 00271 setOperationAction(ISD::FMA, MVT::f32, Legal); 00272 setOperationAction(ISD::FMA, MVT::f64, Legal); 00273 setOperationAction(ISD::FMA, MVT::f128, Expand); 00274 00275 // Needed so that we don't try to implement f128 constant loads using 00276 // a load-and-extend of a f80 constant (in cases where the constant 00277 // would fit in an f80). 00278 setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand); 00279 00280 // Floating-point truncation and stores need to be done separately. 00281 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 00282 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 00283 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 00284 00285 // We have 64-bit FPR<->GPR moves, but need special handling for 00286 // 32-bit forms. 00287 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 00288 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 00289 00290 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 00291 // structure, but VAEND is a no-op. 00292 setOperationAction(ISD::VASTART, MVT::Other, Custom); 00293 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 00294 setOperationAction(ISD::VAEND, MVT::Other, Expand); 00295 00296 // Codes for which we want to perform some z-specific combinations. 00297 setTargetDAGCombine(ISD::SIGN_EXTEND); 00298 00299 // We want to use MVC in preference to even a single load/store pair. 00300 MaxStoresPerMemcpy = 0; 00301 MaxStoresPerMemcpyOptSize = 0; 00302 00303 // The main memset sequence is a byte store followed by an MVC. 00304 // Two STC or MV..I stores win over that, but the kind of fused stores 00305 // generated by target-independent code don't when the byte value is 00306 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better 00307 // than "STC;MVC". Handle the choice in target-specific code instead. 00308 MaxStoresPerMemset = 0; 00309 MaxStoresPerMemsetOptSize = 0; 00310 } 00311 00312 EVT SystemZTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 00313 if (!VT.isVector()) 00314 return MVT::i32; 00315 return VT.changeVectorElementTypeToInteger(); 00316 } 00317 00318 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 00319 VT = VT.getScalarType(); 00320 00321 if (!VT.isSimple()) 00322 return false; 00323 00324 switch (VT.getSimpleVT().SimpleTy) { 00325 case MVT::f32: 00326 case MVT::f64: 00327 return true; 00328 case MVT::f128: 00329 return false; 00330 default: 00331 break; 00332 } 00333 00334 return false; 00335 } 00336 00337 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 00338 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 00339 return Imm.isZero() || Imm.isNegZero(); 00340 } 00341 00342 bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 00343 unsigned, 00344 unsigned, 00345 bool *Fast) const { 00346 // Unaligned accesses should never be slower than the expanded version. 00347 // We check specifically for aligned accesses in the few cases where 00348 // they are required. 00349 if (Fast) 00350 *Fast = true; 00351 return true; 00352 } 00353 00354 bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM, 00355 Type *Ty) const { 00356 // Punt on globals for now, although they can be used in limited 00357 // RELATIVE LONG cases. 00358 if (AM.BaseGV) 00359 return false; 00360 00361 // Require a 20-bit signed offset. 00362 if (!isInt<20>(AM.BaseOffs)) 00363 return false; 00364 00365 // Indexing is OK but no scale factor can be applied. 00366 return AM.Scale == 0 || AM.Scale == 1; 00367 } 00368 00369 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { 00370 if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) 00371 return false; 00372 unsigned FromBits = FromType->getPrimitiveSizeInBits(); 00373 unsigned ToBits = ToType->getPrimitiveSizeInBits(); 00374 return FromBits > ToBits; 00375 } 00376 00377 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { 00378 if (!FromVT.isInteger() || !ToVT.isInteger()) 00379 return false; 00380 unsigned FromBits = FromVT.getSizeInBits(); 00381 unsigned ToBits = ToVT.getSizeInBits(); 00382 return FromBits > ToBits; 00383 } 00384 00385 //===----------------------------------------------------------------------===// 00386 // Inline asm support 00387 //===----------------------------------------------------------------------===// 00388 00389 TargetLowering::ConstraintType 00390 SystemZTargetLowering::getConstraintType(const std::string &Constraint) const { 00391 if (Constraint.size() == 1) { 00392 switch (Constraint[0]) { 00393 case 'a': // Address register 00394 case 'd': // Data register (equivalent to 'r') 00395 case 'f': // Floating-point register 00396 case 'h': // High-part register 00397 case 'r': // General-purpose register 00398 return C_RegisterClass; 00399 00400 case 'Q': // Memory with base and unsigned 12-bit displacement 00401 case 'R': // Likewise, plus an index 00402 case 'S': // Memory with base and signed 20-bit displacement 00403 case 'T': // Likewise, plus an index 00404 case 'm': // Equivalent to 'T'. 00405 return C_Memory; 00406 00407 case 'I': // Unsigned 8-bit constant 00408 case 'J': // Unsigned 12-bit constant 00409 case 'K': // Signed 16-bit constant 00410 case 'L': // Signed 20-bit displacement (on all targets we support) 00411 case 'M': // 0x7fffffff 00412 return C_Other; 00413 00414 default: 00415 break; 00416 } 00417 } 00418 return TargetLowering::getConstraintType(Constraint); 00419 } 00420 00421 TargetLowering::ConstraintWeight SystemZTargetLowering:: 00422 getSingleConstraintMatchWeight(AsmOperandInfo &info, 00423 const char *constraint) const { 00424 ConstraintWeight weight = CW_Invalid; 00425 Value *CallOperandVal = info.CallOperandVal; 00426 // If we don't have a value, we can't do a match, 00427 // but allow it at the lowest weight. 00428 if (!CallOperandVal) 00429 return CW_Default; 00430 Type *type = CallOperandVal->getType(); 00431 // Look at the constraint type. 00432 switch (*constraint) { 00433 default: 00434 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 00435 break; 00436 00437 case 'a': // Address register 00438 case 'd': // Data register (equivalent to 'r') 00439 case 'h': // High-part register 00440 case 'r': // General-purpose register 00441 if (CallOperandVal->getType()->isIntegerTy()) 00442 weight = CW_Register; 00443 break; 00444 00445 case 'f': // Floating-point register 00446 if (type->isFloatingPointTy()) 00447 weight = CW_Register; 00448 break; 00449 00450 case 'I': // Unsigned 8-bit constant 00451 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 00452 if (isUInt<8>(C->getZExtValue())) 00453 weight = CW_Constant; 00454 break; 00455 00456 case 'J': // Unsigned 12-bit constant 00457 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 00458 if (isUInt<12>(C->getZExtValue())) 00459 weight = CW_Constant; 00460 break; 00461 00462 case 'K': // Signed 16-bit constant 00463 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 00464 if (isInt<16>(C->getSExtValue())) 00465 weight = CW_Constant; 00466 break; 00467 00468 case 'L': // Signed 20-bit displacement (on all targets we support) 00469 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 00470 if (isInt<20>(C->getSExtValue())) 00471 weight = CW_Constant; 00472 break; 00473 00474 case 'M': // 0x7fffffff 00475 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 00476 if (C->getZExtValue() == 0x7fffffff) 00477 weight = CW_Constant; 00478 break; 00479 } 00480 return weight; 00481 } 00482 00483 // Parse a "{tNNN}" register constraint for which the register type "t" 00484 // has already been verified. MC is the class associated with "t" and 00485 // Map maps 0-based register numbers to LLVM register numbers. 00486 static std::pair<unsigned, const TargetRegisterClass *> 00487 parseRegisterNumber(const std::string &Constraint, 00488 const TargetRegisterClass *RC, const unsigned *Map) { 00489 assert(*(Constraint.end()-1) == '}' && "Missing '}'"); 00490 if (isdigit(Constraint[2])) { 00491 std::string Suffix(Constraint.data() + 2, Constraint.size() - 2); 00492 unsigned Index = atoi(Suffix.c_str()); 00493 if (Index < 16 && Map[Index]) 00494 return std::make_pair(Map[Index], RC); 00495 } 00496 return std::make_pair(0U, nullptr); 00497 } 00498 00499 std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering:: 00500 getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { 00501 if (Constraint.size() == 1) { 00502 // GCC Constraint Letters 00503 switch (Constraint[0]) { 00504 default: break; 00505 case 'd': // Data register (equivalent to 'r') 00506 case 'r': // General-purpose register 00507 if (VT == MVT::i64) 00508 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 00509 else if (VT == MVT::i128) 00510 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 00511 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 00512 00513 case 'a': // Address register 00514 if (VT == MVT::i64) 00515 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 00516 else if (VT == MVT::i128) 00517 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 00518 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 00519 00520 case 'h': // High-part register (an LLVM extension) 00521 return std::make_pair(0U, &SystemZ::GRH32BitRegClass); 00522 00523 case 'f': // Floating-point register 00524 if (VT == MVT::f64) 00525 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 00526 else if (VT == MVT::f128) 00527 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 00528 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 00529 } 00530 } 00531 if (Constraint[0] == '{') { 00532 // We need to override the default register parsing for GPRs and FPRs 00533 // because the interpretation depends on VT. The internal names of 00534 // the registers are also different from the external names 00535 // (F0D and F0S instead of F0, etc.). 00536 if (Constraint[1] == 'r') { 00537 if (VT == MVT::i32) 00538 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, 00539 SystemZMC::GR32Regs); 00540 if (VT == MVT::i128) 00541 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, 00542 SystemZMC::GR128Regs); 00543 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, 00544 SystemZMC::GR64Regs); 00545 } 00546 if (Constraint[1] == 'f') { 00547 if (VT == MVT::f32) 00548 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, 00549 SystemZMC::FP32Regs); 00550 if (VT == MVT::f128) 00551 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, 00552 SystemZMC::FP128Regs); 00553 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, 00554 SystemZMC::FP64Regs); 00555 } 00556 } 00557 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 00558 } 00559 00560 void SystemZTargetLowering:: 00561 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 00562 std::vector<SDValue> &Ops, 00563 SelectionDAG &DAG) const { 00564 // Only support length 1 constraints for now. 00565 if (Constraint.length() == 1) { 00566 switch (Constraint[0]) { 00567 case 'I': // Unsigned 8-bit constant 00568 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 00569 if (isUInt<8>(C->getZExtValue())) 00570 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 00571 Op.getValueType())); 00572 return; 00573 00574 case 'J': // Unsigned 12-bit constant 00575 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 00576 if (isUInt<12>(C->getZExtValue())) 00577 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 00578 Op.getValueType())); 00579 return; 00580 00581 case 'K': // Signed 16-bit constant 00582 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 00583 if (isInt<16>(C->getSExtValue())) 00584 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 00585 Op.getValueType())); 00586 return; 00587 00588 case 'L': // Signed 20-bit displacement (on all targets we support) 00589 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 00590 if (isInt<20>(C->getSExtValue())) 00591 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 00592 Op.getValueType())); 00593 return; 00594 00595 case 'M': // 0x7fffffff 00596 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 00597 if (C->getZExtValue() == 0x7fffffff) 00598 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 00599 Op.getValueType())); 00600 return; 00601 } 00602 } 00603 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 00604 } 00605 00606 //===----------------------------------------------------------------------===// 00607 // Calling conventions 00608 //===----------------------------------------------------------------------===// 00609 00610 #include "SystemZGenCallingConv.inc" 00611 00612 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, 00613 Type *ToType) const { 00614 return isTruncateFree(FromType, ToType); 00615 } 00616 00617 bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 00618 if (!CI->isTailCall()) 00619 return false; 00620 return true; 00621 } 00622 00623 // Value is a value that has been passed to us in the location described by VA 00624 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 00625 // any loads onto Chain. 00626 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL, 00627 CCValAssign &VA, SDValue Chain, 00628 SDValue Value) { 00629 // If the argument has been promoted from a smaller type, insert an 00630 // assertion to capture this. 00631 if (VA.getLocInfo() == CCValAssign::SExt) 00632 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 00633 DAG.getValueType(VA.getValVT())); 00634 else if (VA.getLocInfo() == CCValAssign::ZExt) 00635 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 00636 DAG.getValueType(VA.getValVT())); 00637 00638 if (VA.isExtInLoc()) 00639 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 00640 else if (VA.getLocInfo() == CCValAssign::Indirect) 00641 Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value, 00642 MachinePointerInfo(), false, false, false, 0); 00643 else 00644 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 00645 return Value; 00646 } 00647 00648 // Value is a value of type VA.getValVT() that we need to copy into 00649 // the location described by VA. Return a copy of Value converted to 00650 // VA.getValVT(). The caller is responsible for handling indirect values. 00651 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL, 00652 CCValAssign &VA, SDValue Value) { 00653 switch (VA.getLocInfo()) { 00654 case CCValAssign::SExt: 00655 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 00656 case CCValAssign::ZExt: 00657 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 00658 case CCValAssign::AExt: 00659 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 00660 case CCValAssign::Full: 00661 return Value; 00662 default: 00663 llvm_unreachable("Unhandled getLocInfo()"); 00664 } 00665 } 00666 00667 SDValue SystemZTargetLowering:: 00668 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 00669 const SmallVectorImpl<ISD::InputArg> &Ins, 00670 SDLoc DL, SelectionDAG &DAG, 00671 SmallVectorImpl<SDValue> &InVals) const { 00672 MachineFunction &MF = DAG.getMachineFunction(); 00673 MachineFrameInfo *MFI = MF.getFrameInfo(); 00674 MachineRegisterInfo &MRI = MF.getRegInfo(); 00675 SystemZMachineFunctionInfo *FuncInfo = 00676 MF.getInfo<SystemZMachineFunctionInfo>(); 00677 auto *TFL = static_cast<const SystemZFrameLowering *>( 00678 DAG.getSubtarget().getFrameLowering()); 00679 00680 // Assign locations to all of the incoming arguments. 00681 SmallVector<CCValAssign, 16> ArgLocs; 00682 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 00683 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 00684 00685 unsigned NumFixedGPRs = 0; 00686 unsigned NumFixedFPRs = 0; 00687 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 00688 SDValue ArgValue; 00689 CCValAssign &VA = ArgLocs[I]; 00690 EVT LocVT = VA.getLocVT(); 00691 if (VA.isRegLoc()) { 00692 // Arguments passed in registers 00693 const TargetRegisterClass *RC; 00694 switch (LocVT.getSimpleVT().SimpleTy) { 00695 default: 00696 // Integers smaller than i64 should be promoted to i64. 00697 llvm_unreachable("Unexpected argument type"); 00698 case MVT::i32: 00699 NumFixedGPRs += 1; 00700 RC = &SystemZ::GR32BitRegClass; 00701 break; 00702 case MVT::i64: 00703 NumFixedGPRs += 1; 00704 RC = &SystemZ::GR64BitRegClass; 00705 break; 00706 case MVT::f32: 00707 NumFixedFPRs += 1; 00708 RC = &SystemZ::FP32BitRegClass; 00709 break; 00710 case MVT::f64: 00711 NumFixedFPRs += 1; 00712 RC = &SystemZ::FP64BitRegClass; 00713 break; 00714 } 00715 00716 unsigned VReg = MRI.createVirtualRegister(RC); 00717 MRI.addLiveIn(VA.getLocReg(), VReg); 00718 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 00719 } else { 00720 assert(VA.isMemLoc() && "Argument not register or memory"); 00721 00722 // Create the frame index object for this incoming parameter. 00723 int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8, 00724 VA.getLocMemOffset(), true); 00725 00726 // Create the SelectionDAG nodes corresponding to a load 00727 // from this parameter. Unpromoted ints and floats are 00728 // passed as right-justified 8-byte values. 00729 EVT PtrVT = getPointerTy(); 00730 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 00731 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 00732 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4)); 00733 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 00734 MachinePointerInfo::getFixedStack(FI), 00735 false, false, false, 0); 00736 } 00737 00738 // Convert the value of the argument register into the value that's 00739 // being passed. 00740 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 00741 } 00742 00743 if (IsVarArg) { 00744 // Save the number of non-varargs registers for later use by va_start, etc. 00745 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 00746 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 00747 00748 // Likewise the address (in the form of a frame index) of where the 00749 // first stack vararg would be. The 1-byte size here is arbitrary. 00750 int64_t StackSize = CCInfo.getNextStackOffset(); 00751 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true)); 00752 00753 // ...and a similar frame index for the caller-allocated save area 00754 // that will be used to store the incoming registers. 00755 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); 00756 unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true); 00757 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 00758 00759 // Store the FPR varargs in the reserved frame slots. (We store the 00760 // GPRs as part of the prologue.) 00761 if (NumFixedFPRs < SystemZ::NumArgFPRs) { 00762 SDValue MemOps[SystemZ::NumArgFPRs]; 00763 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { 00764 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); 00765 int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true); 00766 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 00767 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], 00768 &SystemZ::FP64BitRegClass); 00769 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 00770 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 00771 MachinePointerInfo::getFixedStack(FI), 00772 false, false, 0); 00773 00774 } 00775 // Join the stores, which are independent of one another. 00776 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 00777 makeArrayRef(&MemOps[NumFixedFPRs], 00778 SystemZ::NumArgFPRs-NumFixedFPRs)); 00779 } 00780 } 00781 00782 return Chain; 00783 } 00784 00785 static bool canUseSiblingCall(CCState ArgCCInfo, 00786 SmallVectorImpl<CCValAssign> &ArgLocs) { 00787 // Punt if there are any indirect or stack arguments, or if the call 00788 // needs the call-saved argument register R6. 00789 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 00790 CCValAssign &VA = ArgLocs[I]; 00791 if (VA.getLocInfo() == CCValAssign::Indirect) 00792 return false; 00793 if (!VA.isRegLoc()) 00794 return false; 00795 unsigned Reg = VA.getLocReg(); 00796 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D) 00797 return false; 00798 } 00799 return true; 00800 } 00801 00802 SDValue 00803 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 00804 SmallVectorImpl<SDValue> &InVals) const { 00805 SelectionDAG &DAG = CLI.DAG; 00806 SDLoc &DL = CLI.DL; 00807 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 00808 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 00809 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 00810 SDValue Chain = CLI.Chain; 00811 SDValue Callee = CLI.Callee; 00812 bool &IsTailCall = CLI.IsTailCall; 00813 CallingConv::ID CallConv = CLI.CallConv; 00814 bool IsVarArg = CLI.IsVarArg; 00815 MachineFunction &MF = DAG.getMachineFunction(); 00816 EVT PtrVT = getPointerTy(); 00817 00818 // Analyze the operands of the call, assigning locations to each operand. 00819 SmallVector<CCValAssign, 16> ArgLocs; 00820 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 00821 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 00822 00823 // We don't support GuaranteedTailCallOpt, only automatically-detected 00824 // sibling calls. 00825 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs)) 00826 IsTailCall = false; 00827 00828 // Get a count of how many bytes are to be pushed on the stack. 00829 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 00830 00831 // Mark the start of the call. 00832 if (!IsTailCall) 00833 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true), 00834 DL); 00835 00836 // Copy argument values to their designated locations. 00837 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 00838 SmallVector<SDValue, 8> MemOpChains; 00839 SDValue StackPtr; 00840 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 00841 CCValAssign &VA = ArgLocs[I]; 00842 SDValue ArgValue = OutVals[I]; 00843 00844 if (VA.getLocInfo() == CCValAssign::Indirect) { 00845 // Store the argument in a stack slot and pass its address. 00846 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 00847 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 00848 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot, 00849 MachinePointerInfo::getFixedStack(FI), 00850 false, false, 0)); 00851 ArgValue = SpillSlot; 00852 } else 00853 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 00854 00855 if (VA.isRegLoc()) 00856 // Queue up the argument copies and emit them at the end. 00857 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 00858 else { 00859 assert(VA.isMemLoc() && "Argument not register or memory"); 00860 00861 // Work out the address of the stack slot. Unpromoted ints and 00862 // floats are passed as right-justified 8-byte values. 00863 if (!StackPtr.getNode()) 00864 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 00865 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); 00866 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 00867 Offset += 4; 00868 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 00869 DAG.getIntPtrConstant(Offset)); 00870 00871 // Emit the store. 00872 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address, 00873 MachinePointerInfo(), 00874 false, false, 0)); 00875 } 00876 } 00877 00878 // Join the stores, which are independent of one another. 00879 if (!MemOpChains.empty()) 00880 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 00881 00882 // Accept direct calls by converting symbolic call addresses to the 00883 // associated Target* opcodes. Force %r1 to be used for indirect 00884 // tail calls. 00885 SDValue Glue; 00886 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 00887 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 00888 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 00889 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 00890 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 00891 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 00892 } else if (IsTailCall) { 00893 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); 00894 Glue = Chain.getValue(1); 00895 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); 00896 } 00897 00898 // Build a sequence of copy-to-reg nodes, chained and glued together. 00899 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 00900 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 00901 RegsToPass[I].second, Glue); 00902 Glue = Chain.getValue(1); 00903 } 00904 00905 // The first call operand is the chain and the second is the target address. 00906 SmallVector<SDValue, 8> Ops; 00907 Ops.push_back(Chain); 00908 Ops.push_back(Callee); 00909 00910 // Add argument registers to the end of the list so that they are 00911 // known live into the call. 00912 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 00913 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 00914 RegsToPass[I].second.getValueType())); 00915 00916 // Add a register mask operand representing the call-preserved registers. 00917 const TargetRegisterInfo *TRI = 00918 getTargetMachine().getSubtargetImpl()->getRegisterInfo(); 00919 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 00920 assert(Mask && "Missing call preserved mask for calling convention"); 00921 Ops.push_back(DAG.getRegisterMask(Mask)); 00922 00923 // Glue the call to the argument copies, if any. 00924 if (Glue.getNode()) 00925 Ops.push_back(Glue); 00926 00927 // Emit the call. 00928 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 00929 if (IsTailCall) 00930 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops); 00931 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops); 00932 Glue = Chain.getValue(1); 00933 00934 // Mark the end of the call, which is glued to the call itself. 00935 Chain = DAG.getCALLSEQ_END(Chain, 00936 DAG.getConstant(NumBytes, PtrVT, true), 00937 DAG.getConstant(0, PtrVT, true), 00938 Glue, DL); 00939 Glue = Chain.getValue(1); 00940 00941 // Assign locations to each value returned by this call. 00942 SmallVector<CCValAssign, 16> RetLocs; 00943 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); 00944 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 00945 00946 // Copy all of the result registers out of their specified physreg. 00947 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 00948 CCValAssign &VA = RetLocs[I]; 00949 00950 // Copy the value out, gluing the copy to the end of the call sequence. 00951 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 00952 VA.getLocVT(), Glue); 00953 Chain = RetValue.getValue(1); 00954 Glue = RetValue.getValue(2); 00955 00956 // Convert the value of the return register into the value that's 00957 // being returned. 00958 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 00959 } 00960 00961 return Chain; 00962 } 00963 00964 SDValue 00965 SystemZTargetLowering::LowerReturn(SDValue Chain, 00966 CallingConv::ID CallConv, bool IsVarArg, 00967 const SmallVectorImpl<ISD::OutputArg> &Outs, 00968 const SmallVectorImpl<SDValue> &OutVals, 00969 SDLoc DL, SelectionDAG &DAG) const { 00970 MachineFunction &MF = DAG.getMachineFunction(); 00971 00972 // Assign locations to each returned value. 00973 SmallVector<CCValAssign, 16> RetLocs; 00974 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); 00975 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 00976 00977 // Quick exit for void returns 00978 if (RetLocs.empty()) 00979 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 00980 00981 // Copy the result values into the output registers. 00982 SDValue Glue; 00983 SmallVector<SDValue, 4> RetOps; 00984 RetOps.push_back(Chain); 00985 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 00986 CCValAssign &VA = RetLocs[I]; 00987 SDValue RetValue = OutVals[I]; 00988 00989 // Make the return register live on exit. 00990 assert(VA.isRegLoc() && "Can only return in registers!"); 00991 00992 // Promote the value as required. 00993 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 00994 00995 // Chain and glue the copies together. 00996 unsigned Reg = VA.getLocReg(); 00997 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 00998 Glue = Chain.getValue(1); 00999 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 01000 } 01001 01002 // Update chain and glue. 01003 RetOps[0] = Chain; 01004 if (Glue.getNode()) 01005 RetOps.push_back(Glue); 01006 01007 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps); 01008 } 01009 01010 SDValue SystemZTargetLowering:: 01011 prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL, SelectionDAG &DAG) const { 01012 return DAG.getNode(SystemZISD::SERIALIZE, DL, MVT::Other, Chain); 01013 } 01014 01015 // CC is a comparison that will be implemented using an integer or 01016 // floating-point comparison. Return the condition code mask for 01017 // a branch on true. In the integer case, CCMASK_CMP_UO is set for 01018 // unsigned comparisons and clear for signed ones. In the floating-point 01019 // case, CCMASK_CMP_UO has its normal mask meaning (unordered). 01020 static unsigned CCMaskForCondCode(ISD::CondCode CC) { 01021 #define CONV(X) \ 01022 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 01023 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 01024 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 01025 01026 switch (CC) { 01027 default: 01028 llvm_unreachable("Invalid integer condition!"); 01029 01030 CONV(EQ); 01031 CONV(NE); 01032 CONV(GT); 01033 CONV(GE); 01034 CONV(LT); 01035 CONV(LE); 01036 01037 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 01038 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 01039 } 01040 #undef CONV 01041 } 01042 01043 // Return a sequence for getting a 1 from an IPM result when CC has a 01044 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask. 01045 // The handling of CC values outside CCValid doesn't matter. 01046 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) { 01047 // Deal with cases where the result can be taken directly from a bit 01048 // of the IPM result. 01049 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3))) 01050 return IPMConversion(0, 0, SystemZ::IPM_CC); 01051 if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3))) 01052 return IPMConversion(0, 0, SystemZ::IPM_CC + 1); 01053 01054 // Deal with cases where we can add a value to force the sign bit 01055 // to contain the right value. Putting the bit in 31 means we can 01056 // use SRL rather than RISBG(L), and also makes it easier to get a 01057 // 0/-1 value, so it has priority over the other tests below. 01058 // 01059 // These sequences rely on the fact that the upper two bits of the 01060 // IPM result are zero. 01061 uint64_t TopBit = uint64_t(1) << 31; 01062 if (CCMask == (CCValid & SystemZ::CCMASK_0)) 01063 return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31); 01064 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1))) 01065 return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31); 01066 if (CCMask == (CCValid & (SystemZ::CCMASK_0 01067 | SystemZ::CCMASK_1 01068 | SystemZ::CCMASK_2))) 01069 return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31); 01070 if (CCMask == (CCValid & SystemZ::CCMASK_3)) 01071 return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31); 01072 if (CCMask == (CCValid & (SystemZ::CCMASK_1 01073 | SystemZ::CCMASK_2 01074 | SystemZ::CCMASK_3))) 01075 return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31); 01076 01077 // Next try inverting the value and testing a bit. 0/1 could be 01078 // handled this way too, but we dealt with that case above. 01079 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2))) 01080 return IPMConversion(-1, 0, SystemZ::IPM_CC); 01081 01082 // Handle cases where adding a value forces a non-sign bit to contain 01083 // the right value. 01084 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2))) 01085 return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1); 01086 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3))) 01087 return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1); 01088 01089 // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are 01090 // can be done by inverting the low CC bit and applying one of the 01091 // sign-based extractions above. 01092 if (CCMask == (CCValid & SystemZ::CCMASK_1)) 01093 return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31); 01094 if (CCMask == (CCValid & SystemZ::CCMASK_2)) 01095 return IPMConversion(1 << SystemZ::IPM_CC, 01096 TopBit - (3 << SystemZ::IPM_CC), 31); 01097 if (CCMask == (CCValid & (SystemZ::CCMASK_0 01098 | SystemZ::CCMASK_1 01099 | SystemZ::CCMASK_3))) 01100 return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31); 01101 if (CCMask == (CCValid & (SystemZ::CCMASK_0 01102 | SystemZ::CCMASK_2 01103 | SystemZ::CCMASK_3))) 01104 return IPMConversion(1 << SystemZ::IPM_CC, 01105 TopBit - (1 << SystemZ::IPM_CC), 31); 01106 01107 llvm_unreachable("Unexpected CC combination"); 01108 } 01109 01110 // If C can be converted to a comparison against zero, adjust the operands 01111 // as necessary. 01112 static void adjustZeroCmp(SelectionDAG &DAG, Comparison &C) { 01113 if (C.ICmpType == SystemZICMP::UnsignedOnly) 01114 return; 01115 01116 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode()); 01117 if (!ConstOp1) 01118 return; 01119 01120 int64_t Value = ConstOp1->getSExtValue(); 01121 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) || 01122 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) || 01123 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) || 01124 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) { 01125 C.CCMask ^= SystemZ::CCMASK_CMP_EQ; 01126 C.Op1 = DAG.getConstant(0, C.Op1.getValueType()); 01127 } 01128 } 01129 01130 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI, 01131 // adjust the operands as necessary. 01132 static void adjustSubwordCmp(SelectionDAG &DAG, Comparison &C) { 01133 // For us to make any changes, it must a comparison between a single-use 01134 // load and a constant. 01135 if (!C.Op0.hasOneUse() || 01136 C.Op0.getOpcode() != ISD::LOAD || 01137 C.Op1.getOpcode() != ISD::Constant) 01138 return; 01139 01140 // We must have an 8- or 16-bit load. 01141 auto *Load = cast<LoadSDNode>(C.Op0); 01142 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); 01143 if (NumBits != 8 && NumBits != 16) 01144 return; 01145 01146 // The load must be an extending one and the constant must be within the 01147 // range of the unextended value. 01148 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1); 01149 uint64_t Value = ConstOp1->getZExtValue(); 01150 uint64_t Mask = (1 << NumBits) - 1; 01151 if (Load->getExtensionType() == ISD::SEXTLOAD) { 01152 // Make sure that ConstOp1 is in range of C.Op0. 01153 int64_t SignedValue = ConstOp1->getSExtValue(); 01154 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask) 01155 return; 01156 if (C.ICmpType != SystemZICMP::SignedOnly) { 01157 // Unsigned comparison between two sign-extended values is equivalent 01158 // to unsigned comparison between two zero-extended values. 01159 Value &= Mask; 01160 } else if (NumBits == 8) { 01161 // Try to treat the comparison as unsigned, so that we can use CLI. 01162 // Adjust CCMask and Value as necessary. 01163 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT) 01164 // Test whether the high bit of the byte is set. 01165 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT; 01166 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE) 01167 // Test whether the high bit of the byte is clear. 01168 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT; 01169 else 01170 // No instruction exists for this combination. 01171 return; 01172 C.ICmpType = SystemZICMP::UnsignedOnly; 01173 } 01174 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 01175 if (Value > Mask) 01176 return; 01177 assert(C.ICmpType == SystemZICMP::Any && 01178 "Signedness shouldn't matter here."); 01179 } else 01180 return; 01181 01182 // Make sure that the first operand is an i32 of the right extension type. 01183 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ? 01184 ISD::SEXTLOAD : 01185 ISD::ZEXTLOAD); 01186 if (C.Op0.getValueType() != MVT::i32 || 01187 Load->getExtensionType() != ExtType) 01188 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, 01189 Load->getChain(), Load->getBasePtr(), 01190 Load->getPointerInfo(), Load->getMemoryVT(), 01191 Load->isVolatile(), Load->isNonTemporal(), 01192 Load->isInvariant(), Load->getAlignment()); 01193 01194 // Make sure that the second operand is an i32 with the right value. 01195 if (C.Op1.getValueType() != MVT::i32 || 01196 Value != ConstOp1->getZExtValue()) 01197 C.Op1 = DAG.getConstant(Value, MVT::i32); 01198 } 01199 01200 // Return true if Op is either an unextended load, or a load suitable 01201 // for integer register-memory comparisons of type ICmpType. 01202 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { 01203 auto *Load = dyn_cast<LoadSDNode>(Op.getNode()); 01204 if (Load) { 01205 // There are no instructions to compare a register with a memory byte. 01206 if (Load->getMemoryVT() == MVT::i8) 01207 return false; 01208 // Otherwise decide on extension type. 01209 switch (Load->getExtensionType()) { 01210 case ISD::NON_EXTLOAD: 01211 return true; 01212 case ISD::SEXTLOAD: 01213 return ICmpType != SystemZICMP::UnsignedOnly; 01214 case ISD::ZEXTLOAD: 01215 return ICmpType != SystemZICMP::SignedOnly; 01216 default: 01217 break; 01218 } 01219 } 01220 return false; 01221 } 01222 01223 // Return true if it is better to swap the operands of C. 01224 static bool shouldSwapCmpOperands(const Comparison &C) { 01225 // Leave f128 comparisons alone, since they have no memory forms. 01226 if (C.Op0.getValueType() == MVT::f128) 01227 return false; 01228 01229 // Always keep a floating-point constant second, since comparisons with 01230 // zero can use LOAD TEST and comparisons with other constants make a 01231 // natural memory operand. 01232 if (isa<ConstantFPSDNode>(C.Op1)) 01233 return false; 01234 01235 // Never swap comparisons with zero since there are many ways to optimize 01236 // those later. 01237 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 01238 if (ConstOp1 && ConstOp1->getZExtValue() == 0) 01239 return false; 01240 01241 // Also keep natural memory operands second if the loaded value is 01242 // only used here. Several comparisons have memory forms. 01243 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse()) 01244 return false; 01245 01246 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. 01247 // In that case we generally prefer the memory to be second. 01248 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) { 01249 // The only exceptions are when the second operand is a constant and 01250 // we can use things like CHHSI. 01251 if (!ConstOp1) 01252 return true; 01253 // The unsigned memory-immediate instructions can handle 16-bit 01254 // unsigned integers. 01255 if (C.ICmpType != SystemZICMP::SignedOnly && 01256 isUInt<16>(ConstOp1->getZExtValue())) 01257 return false; 01258 // The signed memory-immediate instructions can handle 16-bit 01259 // signed integers. 01260 if (C.ICmpType != SystemZICMP::UnsignedOnly && 01261 isInt<16>(ConstOp1->getSExtValue())) 01262 return false; 01263 return true; 01264 } 01265 01266 // Try to promote the use of CGFR and CLGFR. 01267 unsigned Opcode0 = C.Op0.getOpcode(); 01268 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND) 01269 return true; 01270 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND) 01271 return true; 01272 if (C.ICmpType != SystemZICMP::SignedOnly && 01273 Opcode0 == ISD::AND && 01274 C.Op0.getOperand(1).getOpcode() == ISD::Constant && 01275 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff) 01276 return true; 01277 01278 return false; 01279 } 01280 01281 // Return a version of comparison CC mask CCMask in which the LT and GT 01282 // actions are swapped. 01283 static unsigned reverseCCMask(unsigned CCMask) { 01284 return ((CCMask & SystemZ::CCMASK_CMP_EQ) | 01285 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) | 01286 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) | 01287 (CCMask & SystemZ::CCMASK_CMP_UO)); 01288 } 01289 01290 // Check whether C tests for equality between X and Y and whether X - Y 01291 // or Y - X is also computed. In that case it's better to compare the 01292 // result of the subtraction against zero. 01293 static void adjustForSubtraction(SelectionDAG &DAG, Comparison &C) { 01294 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 01295 C.CCMask == SystemZ::CCMASK_CMP_NE) { 01296 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 01297 SDNode *N = *I; 01298 if (N->getOpcode() == ISD::SUB && 01299 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) || 01300 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) { 01301 C.Op0 = SDValue(N, 0); 01302 C.Op1 = DAG.getConstant(0, N->getValueType(0)); 01303 return; 01304 } 01305 } 01306 } 01307 } 01308 01309 // Check whether C compares a floating-point value with zero and if that 01310 // floating-point value is also negated. In this case we can use the 01311 // negation to set CC, so avoiding separate LOAD AND TEST and 01312 // LOAD (NEGATIVE/COMPLEMENT) instructions. 01313 static void adjustForFNeg(Comparison &C) { 01314 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1); 01315 if (C1 && C1->isZero()) { 01316 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 01317 SDNode *N = *I; 01318 if (N->getOpcode() == ISD::FNEG) { 01319 C.Op0 = SDValue(N, 0); 01320 C.CCMask = reverseCCMask(C.CCMask); 01321 return; 01322 } 01323 } 01324 } 01325 } 01326 01327 // Check whether C compares (shl X, 32) with 0 and whether X is 01328 // also sign-extended. In that case it is better to test the result 01329 // of the sign extension using LTGFR. 01330 // 01331 // This case is important because InstCombine transforms a comparison 01332 // with (sext (trunc X)) into a comparison with (shl X, 32). 01333 static void adjustForLTGFR(Comparison &C) { 01334 // Check for a comparison between (shl X, 32) and 0. 01335 if (C.Op0.getOpcode() == ISD::SHL && 01336 C.Op0.getValueType() == MVT::i64 && 01337 C.Op1.getOpcode() == ISD::Constant && 01338 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 01339 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); 01340 if (C1 && C1->getZExtValue() == 32) { 01341 SDValue ShlOp0 = C.Op0.getOperand(0); 01342 // See whether X has any SIGN_EXTEND_INREG uses. 01343 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) { 01344 SDNode *N = *I; 01345 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && 01346 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) { 01347 C.Op0 = SDValue(N, 0); 01348 return; 01349 } 01350 } 01351 } 01352 } 01353 } 01354 01355 // If C compares the truncation of an extending load, try to compare 01356 // the untruncated value instead. This exposes more opportunities to 01357 // reuse CC. 01358 static void adjustICmpTruncate(SelectionDAG &DAG, Comparison &C) { 01359 if (C.Op0.getOpcode() == ISD::TRUNCATE && 01360 C.Op0.getOperand(0).getOpcode() == ISD::LOAD && 01361 C.Op1.getOpcode() == ISD::Constant && 01362 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 01363 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0)); 01364 if (L->getMemoryVT().getStoreSizeInBits() 01365 <= C.Op0.getValueType().getSizeInBits()) { 01366 unsigned Type = L->getExtensionType(); 01367 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) || 01368 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) { 01369 C.Op0 = C.Op0.getOperand(0); 01370 C.Op1 = DAG.getConstant(0, C.Op0.getValueType()); 01371 } 01372 } 01373 } 01374 } 01375 01376 // Return true if shift operation N has an in-range constant shift value. 01377 // Store it in ShiftVal if so. 01378 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { 01379 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1)); 01380 if (!Shift) 01381 return false; 01382 01383 uint64_t Amount = Shift->getZExtValue(); 01384 if (Amount >= N.getValueType().getSizeInBits()) 01385 return false; 01386 01387 ShiftVal = Amount; 01388 return true; 01389 } 01390 01391 // Check whether an AND with Mask is suitable for a TEST UNDER MASK 01392 // instruction and whether the CC value is descriptive enough to handle 01393 // a comparison of type Opcode between the AND result and CmpVal. 01394 // CCMask says which comparison result is being tested and BitSize is 01395 // the number of bits in the operands. If TEST UNDER MASK can be used, 01396 // return the corresponding CC mask, otherwise return 0. 01397 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, 01398 uint64_t Mask, uint64_t CmpVal, 01399 unsigned ICmpType) { 01400 assert(Mask != 0 && "ANDs with zero should have been removed by now"); 01401 01402 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. 01403 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) && 01404 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask)) 01405 return 0; 01406 01407 // Work out the masks for the lowest and highest bits. 01408 unsigned HighShift = 63 - countLeadingZeros(Mask); 01409 uint64_t High = uint64_t(1) << HighShift; 01410 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask); 01411 01412 // Signed ordered comparisons are effectively unsigned if the sign 01413 // bit is dropped. 01414 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); 01415 01416 // Check for equality comparisons with 0, or the equivalent. 01417 if (CmpVal == 0) { 01418 if (CCMask == SystemZ::CCMASK_CMP_EQ) 01419 return SystemZ::CCMASK_TM_ALL_0; 01420 if (CCMask == SystemZ::CCMASK_CMP_NE) 01421 return SystemZ::CCMASK_TM_SOME_1; 01422 } 01423 if (EffectivelyUnsigned && CmpVal <= Low) { 01424 if (CCMask == SystemZ::CCMASK_CMP_LT) 01425 return SystemZ::CCMASK_TM_ALL_0; 01426 if (CCMask == SystemZ::CCMASK_CMP_GE) 01427 return SystemZ::CCMASK_TM_SOME_1; 01428 } 01429 if (EffectivelyUnsigned && CmpVal < Low) { 01430 if (CCMask == SystemZ::CCMASK_CMP_LE) 01431 return SystemZ::CCMASK_TM_ALL_0; 01432 if (CCMask == SystemZ::CCMASK_CMP_GT) 01433 return SystemZ::CCMASK_TM_SOME_1; 01434 } 01435 01436 // Check for equality comparisons with the mask, or the equivalent. 01437 if (CmpVal == Mask) { 01438 if (CCMask == SystemZ::CCMASK_CMP_EQ) 01439 return SystemZ::CCMASK_TM_ALL_1; 01440 if (CCMask == SystemZ::CCMASK_CMP_NE) 01441 return SystemZ::CCMASK_TM_SOME_0; 01442 } 01443 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { 01444 if (CCMask == SystemZ::CCMASK_CMP_GT) 01445 return SystemZ::CCMASK_TM_ALL_1; 01446 if (CCMask == SystemZ::CCMASK_CMP_LE) 01447 return SystemZ::CCMASK_TM_SOME_0; 01448 } 01449 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { 01450 if (CCMask == SystemZ::CCMASK_CMP_GE) 01451 return SystemZ::CCMASK_TM_ALL_1; 01452 if (CCMask == SystemZ::CCMASK_CMP_LT) 01453 return SystemZ::CCMASK_TM_SOME_0; 01454 } 01455 01456 // Check for ordered comparisons with the top bit. 01457 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { 01458 if (CCMask == SystemZ::CCMASK_CMP_LE) 01459 return SystemZ::CCMASK_TM_MSB_0; 01460 if (CCMask == SystemZ::CCMASK_CMP_GT) 01461 return SystemZ::CCMASK_TM_MSB_1; 01462 } 01463 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { 01464 if (CCMask == SystemZ::CCMASK_CMP_LT) 01465 return SystemZ::CCMASK_TM_MSB_0; 01466 if (CCMask == SystemZ::CCMASK_CMP_GE) 01467 return SystemZ::CCMASK_TM_MSB_1; 01468 } 01469 01470 // If there are just two bits, we can do equality checks for Low and High 01471 // as well. 01472 if (Mask == Low + High) { 01473 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low) 01474 return SystemZ::CCMASK_TM_MIXED_MSB_0; 01475 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low) 01476 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; 01477 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) 01478 return SystemZ::CCMASK_TM_MIXED_MSB_1; 01479 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) 01480 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; 01481 } 01482 01483 // Looks like we've exhausted our options. 01484 return 0; 01485 } 01486 01487 // See whether C can be implemented as a TEST UNDER MASK instruction. 01488 // Update the arguments with the TM version if so. 01489 static void adjustForTestUnderMask(SelectionDAG &DAG, Comparison &C) { 01490 // Check that we have a comparison with a constant. 01491 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 01492 if (!ConstOp1) 01493 return; 01494 uint64_t CmpVal = ConstOp1->getZExtValue(); 01495 01496 // Check whether the nonconstant input is an AND with a constant mask. 01497 Comparison NewC(C); 01498 uint64_t MaskVal; 01499 ConstantSDNode *Mask = nullptr; 01500 if (C.Op0.getOpcode() == ISD::AND) { 01501 NewC.Op0 = C.Op0.getOperand(0); 01502 NewC.Op1 = C.Op0.getOperand(1); 01503 Mask = dyn_cast<ConstantSDNode>(NewC.Op1); 01504 if (!Mask) 01505 return; 01506 MaskVal = Mask->getZExtValue(); 01507 } else { 01508 // There is no instruction to compare with a 64-bit immediate 01509 // so use TMHH instead if possible. We need an unsigned ordered 01510 // comparison with an i64 immediate. 01511 if (NewC.Op0.getValueType() != MVT::i64 || 01512 NewC.CCMask == SystemZ::CCMASK_CMP_EQ || 01513 NewC.CCMask == SystemZ::CCMASK_CMP_NE || 01514 NewC.ICmpType == SystemZICMP::SignedOnly) 01515 return; 01516 // Convert LE and GT comparisons into LT and GE. 01517 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE || 01518 NewC.CCMask == SystemZ::CCMASK_CMP_GT) { 01519 if (CmpVal == uint64_t(-1)) 01520 return; 01521 CmpVal += 1; 01522 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ; 01523 } 01524 // If the low N bits of Op1 are zero than the low N bits of Op0 can 01525 // be masked off without changing the result. 01526 MaskVal = -(CmpVal & -CmpVal); 01527 NewC.ICmpType = SystemZICMP::UnsignedOnly; 01528 } 01529 01530 // Check whether the combination of mask, comparison value and comparison 01531 // type are suitable. 01532 unsigned BitSize = NewC.Op0.getValueType().getSizeInBits(); 01533 unsigned NewCCMask, ShiftVal; 01534 if (NewC.ICmpType != SystemZICMP::SignedOnly && 01535 NewC.Op0.getOpcode() == ISD::SHL && 01536 isSimpleShift(NewC.Op0, ShiftVal) && 01537 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 01538 MaskVal >> ShiftVal, 01539 CmpVal >> ShiftVal, 01540 SystemZICMP::Any))) { 01541 NewC.Op0 = NewC.Op0.getOperand(0); 01542 MaskVal >>= ShiftVal; 01543 } else if (NewC.ICmpType != SystemZICMP::SignedOnly && 01544 NewC.Op0.getOpcode() == ISD::SRL && 01545 isSimpleShift(NewC.Op0, ShiftVal) && 01546 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 01547 MaskVal << ShiftVal, 01548 CmpVal << ShiftVal, 01549 SystemZICMP::UnsignedOnly))) { 01550 NewC.Op0 = NewC.Op0.getOperand(0); 01551 MaskVal <<= ShiftVal; 01552 } else { 01553 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal, 01554 NewC.ICmpType); 01555 if (!NewCCMask) 01556 return; 01557 } 01558 01559 // Go ahead and make the change. 01560 C.Opcode = SystemZISD::TM; 01561 C.Op0 = NewC.Op0; 01562 if (Mask && Mask->getZExtValue() == MaskVal) 01563 C.Op1 = SDValue(Mask, 0); 01564 else 01565 C.Op1 = DAG.getConstant(MaskVal, C.Op0.getValueType()); 01566 C.CCValid = SystemZ::CCMASK_TM; 01567 C.CCMask = NewCCMask; 01568 } 01569 01570 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1. 01571 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, 01572 ISD::CondCode Cond) { 01573 Comparison C(CmpOp0, CmpOp1); 01574 C.CCMask = CCMaskForCondCode(Cond); 01575 if (C.Op0.getValueType().isFloatingPoint()) { 01576 C.CCValid = SystemZ::CCMASK_FCMP; 01577 C.Opcode = SystemZISD::FCMP; 01578 adjustForFNeg(C); 01579 } else { 01580 C.CCValid = SystemZ::CCMASK_ICMP; 01581 C.Opcode = SystemZISD::ICMP; 01582 // Choose the type of comparison. Equality and inequality tests can 01583 // use either signed or unsigned comparisons. The choice also doesn't 01584 // matter if both sign bits are known to be clear. In those cases we 01585 // want to give the main isel code the freedom to choose whichever 01586 // form fits best. 01587 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 01588 C.CCMask == SystemZ::CCMASK_CMP_NE || 01589 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1))) 01590 C.ICmpType = SystemZICMP::Any; 01591 else if (C.CCMask & SystemZ::CCMASK_CMP_UO) 01592 C.ICmpType = SystemZICMP::UnsignedOnly; 01593 else 01594 C.ICmpType = SystemZICMP::SignedOnly; 01595 C.CCMask &= ~SystemZ::CCMASK_CMP_UO; 01596 adjustZeroCmp(DAG, C); 01597 adjustSubwordCmp(DAG, C); 01598 adjustForSubtraction(DAG, C); 01599 adjustForLTGFR(C); 01600 adjustICmpTruncate(DAG, C); 01601 } 01602 01603 if (shouldSwapCmpOperands(C)) { 01604 std::swap(C.Op0, C.Op1); 01605 C.CCMask = reverseCCMask(C.CCMask); 01606 } 01607 01608 adjustForTestUnderMask(DAG, C); 01609 return C; 01610 } 01611 01612 // Emit the comparison instruction described by C. 01613 static SDValue emitCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) { 01614 if (C.Opcode == SystemZISD::ICMP) 01615 return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1, 01616 DAG.getConstant(C.ICmpType, MVT::i32)); 01617 if (C.Opcode == SystemZISD::TM) { 01618 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) != 01619 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1)); 01620 return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1, 01621 DAG.getConstant(RegisterOnly, MVT::i32)); 01622 } 01623 return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1); 01624 } 01625 01626 // Implement a 32-bit *MUL_LOHI operation by extending both operands to 01627 // 64 bits. Extend is the extension type to use. Store the high part 01628 // in Hi and the low part in Lo. 01629 static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL, 01630 unsigned Extend, SDValue Op0, SDValue Op1, 01631 SDValue &Hi, SDValue &Lo) { 01632 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); 01633 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); 01634 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); 01635 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, DAG.getConstant(32, MVT::i64)); 01636 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); 01637 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); 01638 } 01639 01640 // Lower a binary operation that produces two VT results, one in each 01641 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 01642 // Extend extends Op0 to a GR128, and Opcode performs the GR128 operation 01643 // on the extended Op0 and (unextended) Op1. Store the even register result 01644 // in Even and the odd register result in Odd. 01645 static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT, 01646 unsigned Extend, unsigned Opcode, 01647 SDValue Op0, SDValue Op1, 01648 SDValue &Even, SDValue &Odd) { 01649 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0); 01650 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, 01651 SDValue(In128, 0), Op1); 01652 bool Is32Bit = is32Bit(VT); 01653 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); 01654 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); 01655 } 01656 01657 // Return an i32 value that is 1 if the CC value produced by Glue is 01658 // in the mask CCMask and 0 otherwise. CC is known to have a value 01659 // in CCValid, so other values can be ignored. 01660 static SDValue emitSETCC(SelectionDAG &DAG, SDLoc DL, SDValue Glue, 01661 unsigned CCValid, unsigned CCMask) { 01662 IPMConversion Conversion = getIPMConversion(CCValid, CCMask); 01663 SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue); 01664 01665 if (Conversion.XORValue) 01666 Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result, 01667 DAG.getConstant(Conversion.XORValue, MVT::i32)); 01668 01669 if (Conversion.AddValue) 01670 Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result, 01671 DAG.getConstant(Conversion.AddValue, MVT::i32)); 01672 01673 // The SHR/AND sequence should get optimized to an RISBG. 01674 Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result, 01675 DAG.getConstant(Conversion.Bit, MVT::i32)); 01676 if (Conversion.Bit != 31) 01677 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result, 01678 DAG.getConstant(1, MVT::i32)); 01679 return Result; 01680 } 01681 01682 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op, 01683 SelectionDAG &DAG) const { 01684 SDValue CmpOp0 = Op.getOperand(0); 01685 SDValue CmpOp1 = Op.getOperand(1); 01686 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 01687 SDLoc DL(Op); 01688 01689 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 01690 SDValue Glue = emitCmp(DAG, DL, C); 01691 return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); 01692 } 01693 01694 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 01695 SDValue Chain = Op.getOperand(0); 01696 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 01697 SDValue CmpOp0 = Op.getOperand(2); 01698 SDValue CmpOp1 = Op.getOperand(3); 01699 SDValue Dest = Op.getOperand(4); 01700 SDLoc DL(Op); 01701 01702 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 01703 SDValue Glue = emitCmp(DAG, DL, C); 01704 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), 01705 Chain, DAG.getConstant(C.CCValid, MVT::i32), 01706 DAG.getConstant(C.CCMask, MVT::i32), Dest, Glue); 01707 } 01708 01709 // Return true if Pos is CmpOp and Neg is the negative of CmpOp, 01710 // allowing Pos and Neg to be wider than CmpOp. 01711 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) { 01712 return (Neg.getOpcode() == ISD::SUB && 01713 Neg.getOperand(0).getOpcode() == ISD::Constant && 01714 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 && 01715 Neg.getOperand(1) == Pos && 01716 (Pos == CmpOp || 01717 (Pos.getOpcode() == ISD::SIGN_EXTEND && 01718 Pos.getOperand(0) == CmpOp))); 01719 } 01720 01721 // Return the absolute or negative absolute of Op; IsNegative decides which. 01722 static SDValue getAbsolute(SelectionDAG &DAG, SDLoc DL, SDValue Op, 01723 bool IsNegative) { 01724 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op); 01725 if (IsNegative) 01726 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(), 01727 DAG.getConstant(0, Op.getValueType()), Op); 01728 return Op; 01729 } 01730 01731 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 01732 SelectionDAG &DAG) const { 01733 SDValue CmpOp0 = Op.getOperand(0); 01734 SDValue CmpOp1 = Op.getOperand(1); 01735 SDValue TrueOp = Op.getOperand(2); 01736 SDValue FalseOp = Op.getOperand(3); 01737 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 01738 SDLoc DL(Op); 01739 01740 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 01741 01742 // Check for absolute and negative-absolute selections, including those 01743 // where the comparison value is sign-extended (for LPGFR and LNGFR). 01744 // This check supplements the one in DAGCombiner. 01745 if (C.Opcode == SystemZISD::ICMP && 01746 C.CCMask != SystemZ::CCMASK_CMP_EQ && 01747 C.CCMask != SystemZ::CCMASK_CMP_NE && 01748 C.Op1.getOpcode() == ISD::Constant && 01749 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 01750 if (isAbsolute(C.Op0, TrueOp, FalseOp)) 01751 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT); 01752 if (isAbsolute(C.Op0, FalseOp, TrueOp)) 01753 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT); 01754 } 01755 01756 SDValue Glue = emitCmp(DAG, DL, C); 01757 01758 // Special case for handling -1/0 results. The shifts we use here 01759 // should get optimized with the IPM conversion sequence. 01760 auto *TrueC = dyn_cast<ConstantSDNode>(TrueOp); 01761 auto *FalseC = dyn_cast<ConstantSDNode>(FalseOp); 01762 if (TrueC && FalseC) { 01763 int64_t TrueVal = TrueC->getSExtValue(); 01764 int64_t FalseVal = FalseC->getSExtValue(); 01765 if ((TrueVal == -1 && FalseVal == 0) || (TrueVal == 0 && FalseVal == -1)) { 01766 // Invert the condition if we want -1 on false. 01767 if (TrueVal == 0) 01768 C.CCMask ^= C.CCValid; 01769 SDValue Result = emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); 01770 EVT VT = Op.getValueType(); 01771 // Extend the result to VT. Upper bits are ignored. 01772 if (!is32Bit(VT)) 01773 Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result); 01774 // Sign-extend from the low bit. 01775 SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, MVT::i32); 01776 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt); 01777 return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt); 01778 } 01779 } 01780 01781 SmallVector<SDValue, 5> Ops; 01782 Ops.push_back(TrueOp); 01783 Ops.push_back(FalseOp); 01784 Ops.push_back(DAG.getConstant(C.CCValid, MVT::i32)); 01785 Ops.push_back(DAG.getConstant(C.CCMask, MVT::i32)); 01786 Ops.push_back(Glue); 01787 01788 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 01789 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops); 01790 } 01791 01792 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 01793 SelectionDAG &DAG) const { 01794 SDLoc DL(Node); 01795 const GlobalValue *GV = Node->getGlobal(); 01796 int64_t Offset = Node->getOffset(); 01797 EVT PtrVT = getPointerTy(); 01798 Reloc::Model RM = DAG.getTarget().getRelocationModel(); 01799 CodeModel::Model CM = DAG.getTarget().getCodeModel(); 01800 01801 SDValue Result; 01802 if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) { 01803 // Assign anchors at 1<<12 byte boundaries. 01804 uint64_t Anchor = Offset & ~uint64_t(0xfff); 01805 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor); 01806 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 01807 01808 // The offset can be folded into the address if it is aligned to a halfword. 01809 Offset -= Anchor; 01810 if (Offset != 0 && (Offset & 1) == 0) { 01811 SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset); 01812 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result); 01813 Offset = 0; 01814 } 01815 } else { 01816 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 01817 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 01818 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 01819 MachinePointerInfo::getGOT(), false, false, false, 0); 01820 } 01821 01822 // If there was a non-zero offset that we didn't fold, create an explicit 01823 // addition for it. 01824 if (Offset != 0) 01825 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 01826 DAG.getConstant(Offset, PtrVT)); 01827 01828 return Result; 01829 } 01830 01831 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 01832 SelectionDAG &DAG) const { 01833 SDLoc DL(Node); 01834 const GlobalValue *GV = Node->getGlobal(); 01835 EVT PtrVT = getPointerTy(); 01836 TLSModel::Model model = DAG.getTarget().getTLSModel(GV); 01837 01838 if (model != TLSModel::LocalExec) 01839 llvm_unreachable("only local-exec TLS mode supported"); 01840 01841 // The high part of the thread pointer is in access register 0. 01842 SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 01843 DAG.getConstant(0, MVT::i32)); 01844 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 01845 01846 // The low part of the thread pointer is in access register 1. 01847 SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 01848 DAG.getConstant(1, MVT::i32)); 01849 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 01850 01851 // Merge them into a single 64-bit address. 01852 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 01853 DAG.getConstant(32, PtrVT)); 01854 SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 01855 01856 // Get the offset of GA from the thread pointer. 01857 SystemZConstantPoolValue *CPV = 01858 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 01859 01860 // Force the offset into the constant pool and load it from there. 01861 SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8); 01862 SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 01863 CPAddr, MachinePointerInfo::getConstantPool(), 01864 false, false, false, 0); 01865 01866 // Add the base and offset together. 01867 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 01868 } 01869 01870 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 01871 SelectionDAG &DAG) const { 01872 SDLoc DL(Node); 01873 const BlockAddress *BA = Node->getBlockAddress(); 01874 int64_t Offset = Node->getOffset(); 01875 EVT PtrVT = getPointerTy(); 01876 01877 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 01878 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 01879 return Result; 01880 } 01881 01882 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 01883 SelectionDAG &DAG) const { 01884 SDLoc DL(JT); 01885 EVT PtrVT = getPointerTy(); 01886 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 01887 01888 // Use LARL to load the address of the table. 01889 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 01890 } 01891 01892 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 01893 SelectionDAG &DAG) const { 01894 SDLoc DL(CP); 01895 EVT PtrVT = getPointerTy(); 01896 01897 SDValue Result; 01898 if (CP->isMachineConstantPoolEntry()) 01899 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 01900 CP->getAlignment()); 01901 else 01902 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 01903 CP->getAlignment(), CP->getOffset()); 01904 01905 // Use LARL to load the address of the constant pool entry. 01906 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 01907 } 01908 01909 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 01910 SelectionDAG &DAG) const { 01911 SDLoc DL(Op); 01912 SDValue In = Op.getOperand(0); 01913 EVT InVT = In.getValueType(); 01914 EVT ResVT = Op.getValueType(); 01915 01916 if (InVT == MVT::i32 && ResVT == MVT::f32) { 01917 SDValue In64; 01918 if (Subtarget.hasHighWord()) { 01919 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, 01920 MVT::i64); 01921 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 01922 MVT::i64, SDValue(U64, 0), In); 01923 } else { 01924 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 01925 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, 01926 DAG.getConstant(32, MVT::i64)); 01927 } 01928 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64); 01929 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, 01930 DL, MVT::f32, Out64); 01931 } 01932 if (InVT == MVT::f32 && ResVT == MVT::i32) { 01933 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 01934 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 01935 MVT::f64, SDValue(U64, 0), In); 01936 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64); 01937 if (Subtarget.hasHighWord()) 01938 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL, 01939 MVT::i32, Out64); 01940 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, 01941 DAG.getConstant(32, MVT::i64)); 01942 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 01943 } 01944 llvm_unreachable("Unexpected bitcast combination"); 01945 } 01946 01947 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 01948 SelectionDAG &DAG) const { 01949 MachineFunction &MF = DAG.getMachineFunction(); 01950 SystemZMachineFunctionInfo *FuncInfo = 01951 MF.getInfo<SystemZMachineFunctionInfo>(); 01952 EVT PtrVT = getPointerTy(); 01953 01954 SDValue Chain = Op.getOperand(0); 01955 SDValue Addr = Op.getOperand(1); 01956 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 01957 SDLoc DL(Op); 01958 01959 // The initial values of each field. 01960 const unsigned NumFields = 4; 01961 SDValue Fields[NumFields] = { 01962 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT), 01963 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT), 01964 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 01965 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 01966 }; 01967 01968 // Store each field into its respective slot. 01969 SDValue MemOps[NumFields]; 01970 unsigned Offset = 0; 01971 for (unsigned I = 0; I < NumFields; ++I) { 01972 SDValue FieldAddr = Addr; 01973 if (Offset != 0) 01974 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 01975 DAG.getIntPtrConstant(Offset)); 01976 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 01977 MachinePointerInfo(SV, Offset), 01978 false, false, 0); 01979 Offset += 8; 01980 } 01981 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); 01982 } 01983 01984 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 01985 SelectionDAG &DAG) const { 01986 SDValue Chain = Op.getOperand(0); 01987 SDValue DstPtr = Op.getOperand(1); 01988 SDValue SrcPtr = Op.getOperand(2); 01989 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 01990 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 01991 SDLoc DL(Op); 01992 01993 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32), 01994 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, 01995 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 01996 } 01997 01998 SDValue SystemZTargetLowering:: 01999 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 02000 SDValue Chain = Op.getOperand(0); 02001 SDValue Size = Op.getOperand(1); 02002 SDLoc DL(Op); 02003 02004 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 02005 02006 // Get a reference to the stack pointer. 02007 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 02008 02009 // Get the new stack pointer value. 02010 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size); 02011 02012 // Copy the new stack pointer back. 02013 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 02014 02015 // The allocated data lives above the 160 bytes allocated for the standard 02016 // frame, plus any outgoing stack arguments. We don't know how much that 02017 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 02018 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 02019 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 02020 02021 SDValue Ops[2] = { Result, Chain }; 02022 return DAG.getMergeValues(Ops, DL); 02023 } 02024 02025 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, 02026 SelectionDAG &DAG) const { 02027 EVT VT = Op.getValueType(); 02028 SDLoc DL(Op); 02029 SDValue Ops[2]; 02030 if (is32Bit(VT)) 02031 // Just do a normal 64-bit multiplication and extract the results. 02032 // We define this so that it can be used for constant division. 02033 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), 02034 Op.getOperand(1), Ops[1], Ops[0]); 02035 else { 02036 // Do a full 128-bit multiplication based on UMUL_LOHI64: 02037 // 02038 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) 02039 // 02040 // but using the fact that the upper halves are either all zeros 02041 // or all ones: 02042 // 02043 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) 02044 // 02045 // and grouping the right terms together since they are quicker than the 02046 // multiplication: 02047 // 02048 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) 02049 SDValue C63 = DAG.getConstant(63, MVT::i64); 02050 SDValue LL = Op.getOperand(0); 02051 SDValue RL = Op.getOperand(1); 02052 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); 02053 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); 02054 // UMUL_LOHI64 returns the low result in the odd register and the high 02055 // result in the even register. SMUL_LOHI is defined to return the 02056 // low half first, so the results are in reverse order. 02057 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 02058 LL, RL, Ops[1], Ops[0]); 02059 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); 02060 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); 02061 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); 02062 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); 02063 } 02064 return DAG.getMergeValues(Ops, DL); 02065 } 02066 02067 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 02068 SelectionDAG &DAG) const { 02069 EVT VT = Op.getValueType(); 02070 SDLoc DL(Op); 02071 SDValue Ops[2]; 02072 if (is32Bit(VT)) 02073 // Just do a normal 64-bit multiplication and extract the results. 02074 // We define this so that it can be used for constant division. 02075 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), 02076 Op.getOperand(1), Ops[1], Ops[0]); 02077 else 02078 // UMUL_LOHI64 returns the low result in the odd register and the high 02079 // result in the even register. UMUL_LOHI is defined to return the 02080 // low half first, so the results are in reverse order. 02081 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 02082 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 02083 return DAG.getMergeValues(Ops, DL); 02084 } 02085 02086 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 02087 SelectionDAG &DAG) const { 02088 SDValue Op0 = Op.getOperand(0); 02089 SDValue Op1 = Op.getOperand(1); 02090 EVT VT = Op.getValueType(); 02091 SDLoc DL(Op); 02092 unsigned Opcode; 02093 02094 // We use DSGF for 32-bit division. 02095 if (is32Bit(VT)) { 02096 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 02097 Opcode = SystemZISD::SDIVREM32; 02098 } else if (DAG.ComputeNumSignBits(Op1) > 32) { 02099 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 02100 Opcode = SystemZISD::SDIVREM32; 02101 } else 02102 Opcode = SystemZISD::SDIVREM64; 02103 02104 // DSG(F) takes a 64-bit dividend, so the even register in the GR128 02105 // input is "don't care". The instruction returns the remainder in 02106 // the even register and the quotient in the odd register. 02107 SDValue Ops[2]; 02108 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode, 02109 Op0, Op1, Ops[1], Ops[0]); 02110 return DAG.getMergeValues(Ops, DL); 02111 } 02112 02113 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 02114 SelectionDAG &DAG) const { 02115 EVT VT = Op.getValueType(); 02116 SDLoc DL(Op); 02117 02118 // DL(G) uses a double-width dividend, so we need to clear the even 02119 // register in the GR128 input. The instruction returns the remainder 02120 // in the even register and the quotient in the odd register. 02121 SDValue Ops[2]; 02122 if (is32Bit(VT)) 02123 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32, 02124 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 02125 else 02126 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64, 02127 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 02128 return DAG.getMergeValues(Ops, DL); 02129 } 02130 02131 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 02132 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 02133 02134 // Get the known-zero masks for each operand. 02135 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 02136 APInt KnownZero[2], KnownOne[2]; 02137 DAG.computeKnownBits(Ops[0], KnownZero[0], KnownOne[0]); 02138 DAG.computeKnownBits(Ops[1], KnownZero[1], KnownOne[1]); 02139 02140 // See if the upper 32 bits of one operand and the lower 32 bits of the 02141 // other are known zero. They are the low and high operands respectively. 02142 uint64_t Masks[] = { KnownZero[0].getZExtValue(), 02143 KnownZero[1].getZExtValue() }; 02144 unsigned High, Low; 02145 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 02146 High = 1, Low = 0; 02147 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 02148 High = 0, Low = 1; 02149 else 02150 return Op; 02151 02152 SDValue LowOp = Ops[Low]; 02153 SDValue HighOp = Ops[High]; 02154 02155 // If the high part is a constant, we're better off using IILH. 02156 if (HighOp.getOpcode() == ISD::Constant) 02157 return Op; 02158 02159 // If the low part is a constant that is outside the range of LHI, 02160 // then we're better off using IILF. 02161 if (LowOp.getOpcode() == ISD::Constant) { 02162 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 02163 if (!isInt<16>(Value)) 02164 return Op; 02165 } 02166 02167 // Check whether the high part is an AND that doesn't change the 02168 // high 32 bits and just masks out low bits. We can skip it if so. 02169 if (HighOp.getOpcode() == ISD::AND && 02170 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 02171 SDValue HighOp0 = HighOp.getOperand(0); 02172 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue(); 02173 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff)))) 02174 HighOp = HighOp0; 02175 } 02176 02177 // Take advantage of the fact that all GR32 operations only change the 02178 // low 32 bits by truncating Low to an i32 and inserting it directly 02179 // using a subreg. The interesting cases are those where the truncation 02180 // can be folded. 02181 SDLoc DL(Op); 02182 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 02183 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL, 02184 MVT::i64, HighOp, Low32); 02185 } 02186 02187 // Op is an atomic load. Lower it into a normal volatile load. 02188 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 02189 SelectionDAG &DAG) const { 02190 auto *Node = cast<AtomicSDNode>(Op.getNode()); 02191 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), 02192 Node->getChain(), Node->getBasePtr(), 02193 Node->getMemoryVT(), Node->getMemOperand()); 02194 } 02195 02196 // Op is an atomic store. Lower it into a normal volatile store followed 02197 // by a serialization. 02198 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, 02199 SelectionDAG &DAG) const { 02200 auto *Node = cast<AtomicSDNode>(Op.getNode()); 02201 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(), 02202 Node->getBasePtr(), Node->getMemoryVT(), 02203 Node->getMemOperand()); 02204 return SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), MVT::Other, 02205 Chain), 0); 02206 } 02207 02208 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 02209 // two into the fullword ATOMIC_LOADW_* operation given by Opcode. 02210 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, 02211 SelectionDAG &DAG, 02212 unsigned Opcode) const { 02213 auto *Node = cast<AtomicSDNode>(Op.getNode()); 02214 02215 // 32-bit operations need no code outside the main loop. 02216 EVT NarrowVT = Node->getMemoryVT(); 02217 EVT WideVT = MVT::i32; 02218 if (NarrowVT == WideVT) 02219 return Op; 02220 02221 int64_t BitSize = NarrowVT.getSizeInBits(); 02222 SDValue ChainIn = Node->getChain(); 02223 SDValue Addr = Node->getBasePtr(); 02224 SDValue Src2 = Node->getVal(); 02225 MachineMemOperand *MMO = Node->getMemOperand(); 02226 SDLoc DL(Node); 02227 EVT PtrVT = Addr.getValueType(); 02228 02229 // Convert atomic subtracts of constants into additions. 02230 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 02231 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) { 02232 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 02233 Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType()); 02234 } 02235 02236 // Get the address of the containing word. 02237 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 02238 DAG.getConstant(-4, PtrVT)); 02239 02240 // Get the number of bits that the word must be rotated left in order 02241 // to bring the field to the top bits of a GR32. 02242 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 02243 DAG.getConstant(3, PtrVT)); 02244 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 02245 02246 // Get the complementing shift amount, for rotating a field in the top 02247 // bits back to its proper position. 02248 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 02249 DAG.getConstant(0, WideVT), BitShift); 02250 02251 // Extend the source operand to 32 bits and prepare it for the inner loop. 02252 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 02253 // operations require the source to be shifted in advance. (This shift 02254 // can be folded if the source is constant.) For AND and NAND, the lower 02255 // bits must be set, while for other opcodes they should be left clear. 02256 if (Opcode != SystemZISD::ATOMIC_SWAPW) 02257 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 02258 DAG.getConstant(32 - BitSize, WideVT)); 02259 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 02260 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 02261 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 02262 DAG.getConstant(uint32_t(-1) >> BitSize, WideVT)); 02263 02264 // Construct the ATOMIC_LOADW_* node. 02265 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 02266 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 02267 DAG.getConstant(BitSize, WideVT) }; 02268 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 02269 NarrowVT, MMO); 02270 02271 // Rotate the result of the final CS so that the field is in the lower 02272 // bits of a GR32, then truncate it. 02273 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 02274 DAG.getConstant(BitSize, WideVT)); 02275 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 02276 02277 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 02278 return DAG.getMergeValues(RetOps, DL); 02279 } 02280 02281 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations 02282 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit 02283 // operations into additions. 02284 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, 02285 SelectionDAG &DAG) const { 02286 auto *Node = cast<AtomicSDNode>(Op.getNode()); 02287 EVT MemVT = Node->getMemoryVT(); 02288 if (MemVT == MVT::i32 || MemVT == MVT::i64) { 02289 // A full-width operation. 02290 assert(Op.getValueType() == MemVT && "Mismatched VTs"); 02291 SDValue Src2 = Node->getVal(); 02292 SDValue NegSrc2; 02293 SDLoc DL(Src2); 02294 02295 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) { 02296 // Use an addition if the operand is constant and either LAA(G) is 02297 // available or the negative value is in the range of A(G)FHI. 02298 int64_t Value = (-Op2->getAPIntValue()).getSExtValue(); 02299 if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1()) 02300 NegSrc2 = DAG.getConstant(Value, MemVT); 02301 } else if (Subtarget.hasInterlockedAccess1()) 02302 // Use LAA(G) if available. 02303 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, MemVT), 02304 Src2); 02305 02306 if (NegSrc2.getNode()) 02307 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT, 02308 Node->getChain(), Node->getBasePtr(), NegSrc2, 02309 Node->getMemOperand(), Node->getOrdering(), 02310 Node->getSynchScope()); 02311 02312 // Use the node as-is. 02313 return Op; 02314 } 02315 02316 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 02317 } 02318 02319 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two 02320 // into a fullword ATOMIC_CMP_SWAPW operation. 02321 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 02322 SelectionDAG &DAG) const { 02323 auto *Node = cast<AtomicSDNode>(Op.getNode()); 02324 02325 // We have native support for 32-bit compare and swap. 02326 EVT NarrowVT = Node->getMemoryVT(); 02327 EVT WideVT = MVT::i32; 02328 if (NarrowVT == WideVT) 02329 return Op; 02330 02331 int64_t BitSize = NarrowVT.getSizeInBits(); 02332 SDValue ChainIn = Node->getOperand(0); 02333 SDValue Addr = Node->getOperand(1); 02334 SDValue CmpVal = Node->getOperand(2); 02335 SDValue SwapVal = Node->getOperand(3); 02336 MachineMemOperand *MMO = Node->getMemOperand(); 02337 SDLoc DL(Node); 02338 EVT PtrVT = Addr.getValueType(); 02339 02340 // Get the address of the containing word. 02341 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 02342 DAG.getConstant(-4, PtrVT)); 02343 02344 // Get the number of bits that the word must be rotated left in order 02345 // to bring the field to the top bits of a GR32. 02346 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 02347 DAG.getConstant(3, PtrVT)); 02348 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 02349 02350 // Get the complementing shift amount, for rotating a field in the top 02351 // bits back to its proper position. 02352 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 02353 DAG.getConstant(0, WideVT), BitShift); 02354 02355 // Construct the ATOMIC_CMP_SWAPW node. 02356 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 02357 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 02358 NegBitShift, DAG.getConstant(BitSize, WideVT) }; 02359 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 02360 VTList, Ops, NarrowVT, MMO); 02361 return AtomicOp; 02362 } 02363 02364 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 02365 SelectionDAG &DAG) const { 02366 MachineFunction &MF = DAG.getMachineFunction(); 02367 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 02368 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 02369 SystemZ::R15D, Op.getValueType()); 02370 } 02371 02372 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 02373 SelectionDAG &DAG) const { 02374 MachineFunction &MF = DAG.getMachineFunction(); 02375 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 02376 return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), 02377 SystemZ::R15D, Op.getOperand(1)); 02378 } 02379 02380 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, 02381 SelectionDAG &DAG) const { 02382 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 02383 if (!IsData) 02384 // Just preserve the chain. 02385 return Op.getOperand(0); 02386 02387 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 02388 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; 02389 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode()); 02390 SDValue Ops[] = { 02391 Op.getOperand(0), 02392 DAG.getConstant(Code, MVT::i32), 02393 Op.getOperand(1) 02394 }; 02395 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op), 02396 Node->getVTList(), Ops, 02397 Node->getMemoryVT(), Node->getMemOperand()); 02398 } 02399 02400 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 02401 SelectionDAG &DAG) const { 02402 switch (Op.getOpcode()) { 02403 case ISD::BR_CC: 02404 return lowerBR_CC(Op, DAG); 02405 case ISD::SELECT_CC: 02406 return lowerSELECT_CC(Op, DAG); 02407 case ISD::SETCC: 02408 return lowerSETCC(Op, DAG); 02409 case ISD::GlobalAddress: 02410 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 02411 case ISD::GlobalTLSAddress: 02412 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 02413 case ISD::BlockAddress: 02414 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 02415 case ISD::JumpTable: 02416 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 02417 case ISD::ConstantPool: 02418 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 02419 case ISD::BITCAST: 02420 return lowerBITCAST(Op, DAG); 02421 case ISD::VASTART: 02422 return lowerVASTART(Op, DAG); 02423 case ISD::VACOPY: 02424 return lowerVACOPY(Op, DAG); 02425 case ISD::DYNAMIC_STACKALLOC: 02426 return lowerDYNAMIC_STACKALLOC(Op, DAG); 02427 case ISD::SMUL_LOHI: 02428 return lowerSMUL_LOHI(Op, DAG); 02429 case ISD::UMUL_LOHI: 02430 return lowerUMUL_LOHI(Op, DAG); 02431 case ISD::SDIVREM: 02432 return lowerSDIVREM(Op, DAG); 02433 case ISD::UDIVREM: 02434 return lowerUDIVREM(Op, DAG); 02435 case ISD::OR: 02436 return lowerOR(Op, DAG); 02437 case ISD::ATOMIC_SWAP: 02438 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW); 02439 case ISD::ATOMIC_STORE: 02440 return lowerATOMIC_STORE(Op, DAG); 02441 case ISD::ATOMIC_LOAD: 02442 return lowerATOMIC_LOAD(Op, DAG); 02443 case ISD::ATOMIC_LOAD_ADD: 02444 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 02445 case ISD::ATOMIC_LOAD_SUB: 02446 return lowerATOMIC_LOAD_SUB(Op, DAG); 02447 case ISD::ATOMIC_LOAD_AND: 02448 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 02449 case ISD::ATOMIC_LOAD_OR: 02450 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 02451 case ISD::ATOMIC_LOAD_XOR: 02452 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 02453 case ISD::ATOMIC_LOAD_NAND: 02454 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 02455 case ISD::ATOMIC_LOAD_MIN: 02456 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 02457 case ISD::ATOMIC_LOAD_MAX: 02458 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 02459 case ISD::ATOMIC_LOAD_UMIN: 02460 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 02461 case ISD::ATOMIC_LOAD_UMAX: 02462 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 02463 case ISD::ATOMIC_CMP_SWAP: 02464 return lowerATOMIC_CMP_SWAP(Op, DAG); 02465 case ISD::STACKSAVE: 02466 return lowerSTACKSAVE(Op, DAG); 02467 case ISD::STACKRESTORE: 02468 return lowerSTACKRESTORE(Op, DAG); 02469 case ISD::PREFETCH: 02470 return lowerPREFETCH(Op, DAG); 02471 default: 02472 llvm_unreachable("Unexpected node to lower"); 02473 } 02474 } 02475 02476 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 02477 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 02478 switch (Opcode) { 02479 OPCODE(RET_FLAG); 02480 OPCODE(CALL); 02481 OPCODE(SIBCALL); 02482 OPCODE(PCREL_WRAPPER); 02483 OPCODE(PCREL_OFFSET); 02484 OPCODE(IABS); 02485 OPCODE(ICMP); 02486 OPCODE(FCMP); 02487 OPCODE(TM); 02488 OPCODE(BR_CCMASK); 02489 OPCODE(SELECT_CCMASK); 02490 OPCODE(ADJDYNALLOC); 02491 OPCODE(EXTRACT_ACCESS); 02492 OPCODE(UMUL_LOHI64); 02493 OPCODE(SDIVREM64); 02494 OPCODE(UDIVREM32); 02495 OPCODE(UDIVREM64); 02496 OPCODE(MVC); 02497 OPCODE(MVC_LOOP); 02498 OPCODE(NC); 02499 OPCODE(NC_LOOP); 02500 OPCODE(OC); 02501 OPCODE(OC_LOOP); 02502 OPCODE(XC); 02503 OPCODE(XC_LOOP); 02504 OPCODE(CLC); 02505 OPCODE(CLC_LOOP); 02506 OPCODE(STRCMP); 02507 OPCODE(STPCPY); 02508 OPCODE(SEARCH_STRING); 02509 OPCODE(IPM); 02510 OPCODE(SERIALIZE); 02511 OPCODE(ATOMIC_SWAPW); 02512 OPCODE(ATOMIC_LOADW_ADD); 02513 OPCODE(ATOMIC_LOADW_SUB); 02514 OPCODE(ATOMIC_LOADW_AND); 02515 OPCODE(ATOMIC_LOADW_OR); 02516 OPCODE(ATOMIC_LOADW_XOR); 02517 OPCODE(ATOMIC_LOADW_NAND); 02518 OPCODE(ATOMIC_LOADW_MIN); 02519 OPCODE(ATOMIC_LOADW_MAX); 02520 OPCODE(ATOMIC_LOADW_UMIN); 02521 OPCODE(ATOMIC_LOADW_UMAX); 02522 OPCODE(ATOMIC_CMP_SWAPW); 02523 OPCODE(PREFETCH); 02524 } 02525 return nullptr; 02526 #undef OPCODE 02527 } 02528 02529 SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N, 02530 DAGCombinerInfo &DCI) const { 02531 SelectionDAG &DAG = DCI.DAG; 02532 unsigned Opcode = N->getOpcode(); 02533 if (Opcode == ISD::SIGN_EXTEND) { 02534 // Convert (sext (ashr (shl X, C1), C2)) to 02535 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as 02536 // cheap as narrower ones. 02537 SDValue N0 = N->getOperand(0); 02538 EVT VT = N->getValueType(0); 02539 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) { 02540 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 02541 SDValue Inner = N0.getOperand(0); 02542 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) { 02543 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) { 02544 unsigned Extra = (VT.getSizeInBits() - 02545 N0.getValueType().getSizeInBits()); 02546 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra; 02547 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra; 02548 EVT ShiftVT = N0.getOperand(1).getValueType(); 02549 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT, 02550 Inner.getOperand(0)); 02551 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext, 02552 DAG.getConstant(NewShlAmt, ShiftVT)); 02553 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, 02554 DAG.getConstant(NewSraAmt, ShiftVT)); 02555 } 02556 } 02557 } 02558 } 02559 return SDValue(); 02560 } 02561 02562 //===----------------------------------------------------------------------===// 02563 // Custom insertion 02564 //===----------------------------------------------------------------------===// 02565 02566 // Create a new basic block after MBB. 02567 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { 02568 MachineFunction &MF = *MBB->getParent(); 02569 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); 02570 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB); 02571 return NewMBB; 02572 } 02573 02574 // Split MBB after MI and return the new block (the one that contains 02575 // instructions after MI). 02576 static MachineBasicBlock *splitBlockAfter(MachineInstr *MI, 02577 MachineBasicBlock *MBB) { 02578 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 02579 NewMBB->splice(NewMBB->begin(), MBB, 02580 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 02581 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 02582 return NewMBB; 02583 } 02584 02585 // Split MBB before MI and return the new block (the one that contains MI). 02586 static MachineBasicBlock *splitBlockBefore(MachineInstr *MI, 02587 MachineBasicBlock *MBB) { 02588 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 02589 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end()); 02590 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 02591 return NewMBB; 02592 } 02593 02594 // Force base value Base into a register before MI. Return the register. 02595 static unsigned forceReg(MachineInstr *MI, MachineOperand &Base, 02596 const SystemZInstrInfo *TII) { 02597 if (Base.isReg()) 02598 return Base.getReg(); 02599 02600 MachineBasicBlock *MBB = MI->getParent(); 02601 MachineFunction &MF = *MBB->getParent(); 02602 MachineRegisterInfo &MRI = MF.getRegInfo(); 02603 02604 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 02605 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LA), Reg) 02606 .addOperand(Base).addImm(0).addReg(0); 02607 return Reg; 02608 } 02609 02610 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 02611 MachineBasicBlock * 02612 SystemZTargetLowering::emitSelect(MachineInstr *MI, 02613 MachineBasicBlock *MBB) const { 02614 const SystemZInstrInfo *TII = static_cast<const SystemZInstrInfo *>( 02615 MBB->getParent()->getSubtarget().getInstrInfo()); 02616 02617 unsigned DestReg = MI->getOperand(0).getReg(); 02618 unsigned TrueReg = MI->getOperand(1).getReg(); 02619 unsigned FalseReg = MI->getOperand(2).getReg(); 02620 unsigned CCValid = MI->getOperand(3).getImm(); 02621 unsigned CCMask = MI->getOperand(4).getImm(); 02622 DebugLoc DL = MI->getDebugLoc(); 02623 02624 MachineBasicBlock *StartMBB = MBB; 02625 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 02626 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 02627 02628 // StartMBB: 02629 // BRC CCMask, JoinMBB 02630 // # fallthrough to FalseMBB 02631 MBB = StartMBB; 02632 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 02633 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 02634 MBB->addSuccessor(JoinMBB); 02635 MBB->addSuccessor(FalseMBB); 02636 02637 // FalseMBB: 02638 // # fallthrough to JoinMBB 02639 MBB = FalseMBB; 02640 MBB->addSuccessor(JoinMBB); 02641 02642 // JoinMBB: 02643 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 02644 // ... 02645 MBB = JoinMBB; 02646 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg) 02647 .addReg(TrueReg).addMBB(StartMBB) 02648 .addReg(FalseReg).addMBB(FalseMBB); 02649 02650 MI->eraseFromParent(); 02651 return JoinMBB; 02652 } 02653 02654 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 02655 // StoreOpcode is the store to use and Invert says whether the store should 02656 // happen when the condition is false rather than true. If a STORE ON 02657 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. 02658 MachineBasicBlock * 02659 SystemZTargetLowering::emitCondStore(MachineInstr *MI, 02660 MachineBasicBlock *MBB, 02661 unsigned StoreOpcode, unsigned STOCOpcode, 02662 bool Invert) const { 02663 const SystemZInstrInfo *TII = static_cast<const SystemZInstrInfo *>( 02664 MBB->getParent()->getSubtarget().getInstrInfo()); 02665 02666 unsigned SrcReg = MI->getOperand(0).getReg(); 02667 MachineOperand Base = MI->getOperand(1); 02668 int64_t Disp = MI->getOperand(2).getImm(); 02669 unsigned IndexReg = MI->getOperand(3).getReg(); 02670 unsigned CCValid = MI->getOperand(4).getImm(); 02671 unsigned CCMask = MI->getOperand(5).getImm(); 02672 DebugLoc DL = MI->getDebugLoc(); 02673 02674 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 02675 02676 // Use STOCOpcode if possible. We could use different store patterns in 02677 // order to avoid matching the index register, but the performance trade-offs 02678 // might be more complicated in that case. 02679 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) { 02680 if (Invert) 02681 CCMask ^= CCValid; 02682 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) 02683 .addReg(SrcReg).addOperand(Base).addImm(Disp) 02684 .addImm(CCValid).addImm(CCMask); 02685 MI->eraseFromParent(); 02686 return MBB; 02687 } 02688 02689 // Get the condition needed to branch around the store. 02690 if (!Invert) 02691 CCMask ^= CCValid; 02692 02693 MachineBasicBlock *StartMBB = MBB; 02694 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 02695 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 02696 02697 // StartMBB: 02698 // BRC CCMask, JoinMBB 02699 // # fallthrough to FalseMBB 02700 MBB = StartMBB; 02701 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 02702 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 02703 MBB->addSuccessor(JoinMBB); 02704 MBB->addSuccessor(FalseMBB); 02705 02706 // FalseMBB: 02707 // store %SrcReg, %Disp(%Index,%Base) 02708 // # fallthrough to JoinMBB 02709 MBB = FalseMBB; 02710 BuildMI(MBB, DL, TII->get(StoreOpcode)) 02711 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); 02712 MBB->addSuccessor(JoinMBB); 02713 02714 MI->eraseFromParent(); 02715 return JoinMBB; 02716 } 02717 02718 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 02719 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 02720 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 02721 // BitSize is the width of the field in bits, or 0 if this is a partword 02722 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 02723 // is one of the operands. Invert says whether the field should be 02724 // inverted after performing BinOpcode (e.g. for NAND). 02725 MachineBasicBlock * 02726 SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, 02727 MachineBasicBlock *MBB, 02728 unsigned BinOpcode, 02729 unsigned BitSize, 02730 bool Invert) const { 02731 MachineFunction &MF = *MBB->getParent(); 02732 const SystemZInstrInfo *TII = 02733 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); 02734 MachineRegisterInfo &MRI = MF.getRegInfo(); 02735 bool IsSubWord = (BitSize < 32); 02736 02737 // Extract the operands. Base can be a register or a frame index. 02738 // Src2 can be a register or immediate. 02739 unsigned Dest = MI->getOperand(0).getReg(); 02740 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 02741 int64_t Disp = MI->getOperand(2).getImm(); 02742 MachineOperand Src2 = earlyUseOperand(MI->getOperand(3)); 02743 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 02744 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 02745 DebugLoc DL = MI->getDebugLoc(); 02746 if (IsSubWord) 02747 BitSize = MI->getOperand(6).getImm(); 02748 02749 // Subword operations use 32-bit registers. 02750 const TargetRegisterClass *RC = (BitSize <= 32 ? 02751 &SystemZ::GR32BitRegClass : 02752 &SystemZ::GR64BitRegClass); 02753 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 02754 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 02755 02756 // Get the right opcodes for the displacement. 02757 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 02758 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 02759 assert(LOpcode && CSOpcode && "Displacement out of range"); 02760 02761 // Create virtual registers for temporary results. 02762 unsigned OrigVal = MRI.createVirtualRegister(RC); 02763 unsigned OldVal = MRI.createVirtualRegister(RC); 02764 unsigned NewVal = (BinOpcode || IsSubWord ? 02765 MRI.createVirtualRegister(RC) : Src2.getReg()); 02766 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 02767 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 02768 02769 // Insert a basic block for the main loop. 02770 MachineBasicBlock *StartMBB = MBB; 02771 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 02772 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 02773 02774 // StartMBB: 02775 // ... 02776 // %OrigVal = L Disp(%Base) 02777 // # fall through to LoopMMB 02778 MBB = StartMBB; 02779 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 02780 .addOperand(Base).addImm(Disp).addReg(0); 02781 MBB->addSuccessor(LoopMBB); 02782 02783 // LoopMBB: 02784 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 02785 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 02786 // %RotatedNewVal = OP %RotatedOldVal, %Src2 02787 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 02788 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 02789 // JNE LoopMBB 02790 // # fall through to DoneMMB 02791 MBB = LoopMBB; 02792 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 02793 .addReg(OrigVal).addMBB(StartMBB) 02794 .addReg(Dest).addMBB(LoopMBB); 02795 if (IsSubWord) 02796 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 02797 .addReg(OldVal).addReg(BitShift).addImm(0); 02798 if (Invert) { 02799 // Perform the operation normally and then invert every bit of the field. 02800 unsigned Tmp = MRI.createVirtualRegister(RC); 02801 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp) 02802 .addReg(RotatedOldVal).addOperand(Src2); 02803 if (BitSize <= 32) 02804 // XILF with the upper BitSize bits set. 02805 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) 02806 .addReg(Tmp).addImm(-1U << (32 - BitSize)); 02807 else { 02808 // Use LCGR and add -1 to the result, which is more compact than 02809 // an XILF, XILH pair. 02810 unsigned Tmp2 = MRI.createVirtualRegister(RC); 02811 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 02812 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 02813 .addReg(Tmp2).addImm(-1); 02814 } 02815 } else if (BinOpcode) 02816 // A simply binary operation. 02817 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 02818 .addReg(RotatedOldVal).addOperand(Src2); 02819 else if (IsSubWord) 02820 // Use RISBG to rotate Src2 into position and use it to replace the 02821 // field in RotatedOldVal. 02822 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 02823 .addReg(RotatedOldVal).addReg(Src2.getReg()) 02824 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 02825 if (IsSubWord) 02826 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 02827 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 02828 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 02829 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 02830 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 02831 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 02832 MBB->addSuccessor(LoopMBB); 02833 MBB->addSuccessor(DoneMBB); 02834 02835 MI->eraseFromParent(); 02836 return DoneMBB; 02837 } 02838 02839 // Implement EmitInstrWithCustomInserter for pseudo 02840 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 02841 // instruction that should be used to compare the current field with the 02842 // minimum or maximum value. KeepOldMask is the BRC condition-code mask 02843 // for when the current field should be kept. BitSize is the width of 02844 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 02845 MachineBasicBlock * 02846 SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, 02847 MachineBasicBlock *MBB, 02848 unsigned CompareOpcode, 02849 unsigned KeepOldMask, 02850 unsigned BitSize) const { 02851 MachineFunction &MF = *MBB->getParent(); 02852 const SystemZInstrInfo *TII = 02853 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); 02854 MachineRegisterInfo &MRI = MF.getRegInfo(); 02855 bool IsSubWord = (BitSize < 32); 02856 02857 // Extract the operands. Base can be a register or a frame index. 02858 unsigned Dest = MI->getOperand(0).getReg(); 02859 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 02860 int64_t Disp = MI->getOperand(2).getImm(); 02861 unsigned Src2 = MI->getOperand(3).getReg(); 02862 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 02863 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 02864 DebugLoc DL = MI->getDebugLoc(); 02865 if (IsSubWord) 02866 BitSize = MI->getOperand(6).getImm(); 02867 02868 // Subword operations use 32-bit registers. 02869 const TargetRegisterClass *RC = (BitSize <= 32 ? 02870 &SystemZ::GR32BitRegClass : 02871 &SystemZ::GR64BitRegClass); 02872 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 02873 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 02874 02875 // Get the right opcodes for the displacement. 02876 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 02877 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 02878 assert(LOpcode && CSOpcode && "Displacement out of range"); 02879 02880 // Create virtual registers for temporary results. 02881 unsigned OrigVal = MRI.createVirtualRegister(RC); 02882 unsigned OldVal = MRI.createVirtualRegister(RC); 02883 unsigned NewVal = MRI.createVirtualRegister(RC); 02884 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 02885 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 02886 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 02887 02888 // Insert 3 basic blocks for the loop. 02889 MachineBasicBlock *StartMBB = MBB; 02890 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 02891 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 02892 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); 02893 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); 02894 02895 // StartMBB: 02896 // ... 02897 // %OrigVal = L Disp(%Base) 02898 // # fall through to LoopMMB 02899 MBB = StartMBB; 02900 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 02901 .addOperand(Base).addImm(Disp).addReg(0); 02902 MBB->addSuccessor(LoopMBB); 02903 02904 // LoopMBB: 02905 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 02906 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 02907 // CompareOpcode %RotatedOldVal, %Src2 02908 // BRC KeepOldMask, UpdateMBB 02909 MBB = LoopMBB; 02910 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 02911 .addReg(OrigVal).addMBB(StartMBB) 02912 .addReg(Dest).addMBB(UpdateMBB); 02913 if (IsSubWord) 02914 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 02915 .addReg(OldVal).addReg(BitShift).addImm(0); 02916 BuildMI(MBB, DL, TII->get(CompareOpcode)) 02917 .addReg(RotatedOldVal).addReg(Src2); 02918 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 02919 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); 02920 MBB->addSuccessor(UpdateMBB); 02921 MBB->addSuccessor(UseAltMBB); 02922 02923 // UseAltMBB: 02924 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 02925 // # fall through to UpdateMMB 02926 MBB = UseAltMBB; 02927 if (IsSubWord) 02928 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 02929 .addReg(RotatedOldVal).addReg(Src2) 02930 .addImm(32).addImm(31 + BitSize).addImm(0); 02931 MBB->addSuccessor(UpdateMBB); 02932 02933 // UpdateMBB: 02934 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 02935 // [ %RotatedAltVal, UseAltMBB ] 02936 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 02937 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 02938 // JNE LoopMBB 02939 // # fall through to DoneMMB 02940 MBB = UpdateMBB; 02941 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 02942 .addReg(RotatedOldVal).addMBB(LoopMBB) 02943 .addReg(RotatedAltVal).addMBB(UseAltMBB); 02944 if (IsSubWord) 02945 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 02946 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 02947 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 02948 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 02949 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 02950 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 02951 MBB->addSuccessor(LoopMBB); 02952 MBB->addSuccessor(DoneMBB); 02953 02954 MI->eraseFromParent(); 02955 return DoneMBB; 02956 } 02957 02958 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 02959 // instruction MI. 02960 MachineBasicBlock * 02961 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, 02962 MachineBasicBlock *MBB) const { 02963 MachineFunction &MF = *MBB->getParent(); 02964 const SystemZInstrInfo *TII = 02965 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); 02966 MachineRegisterInfo &MRI = MF.getRegInfo(); 02967 02968 // Extract the operands. Base can be a register or a frame index. 02969 unsigned Dest = MI->getOperand(0).getReg(); 02970 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 02971 int64_t Disp = MI->getOperand(2).getImm(); 02972 unsigned OrigCmpVal = MI->getOperand(3).getReg(); 02973 unsigned OrigSwapVal = MI->getOperand(4).getReg(); 02974 unsigned BitShift = MI->getOperand(5).getReg(); 02975 unsigned NegBitShift = MI->getOperand(6).getReg(); 02976 int64_t BitSize = MI->getOperand(7).getImm(); 02977 DebugLoc DL = MI->getDebugLoc(); 02978 02979 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 02980 02981 // Get the right opcodes for the displacement. 02982 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 02983 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 02984 assert(LOpcode && CSOpcode && "Displacement out of range"); 02985 02986 // Create virtual registers for temporary results. 02987 unsigned OrigOldVal = MRI.createVirtualRegister(RC); 02988 unsigned OldVal = MRI.createVirtualRegister(RC); 02989 unsigned CmpVal = MRI.createVirtualRegister(RC); 02990 unsigned SwapVal = MRI.createVirtualRegister(RC); 02991 unsigned StoreVal = MRI.createVirtualRegister(RC); 02992 unsigned RetryOldVal = MRI.createVirtualRegister(RC); 02993 unsigned RetryCmpVal = MRI.createVirtualRegister(RC); 02994 unsigned RetrySwapVal = MRI.createVirtualRegister(RC); 02995 02996 // Insert 2 basic blocks for the loop. 02997 MachineBasicBlock *StartMBB = MBB; 02998 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 02999 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 03000 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); 03001 03002 // StartMBB: 03003 // ... 03004 // %OrigOldVal = L Disp(%Base) 03005 // # fall through to LoopMMB 03006 MBB = StartMBB; 03007 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 03008 .addOperand(Base).addImm(Disp).addReg(0); 03009 MBB->addSuccessor(LoopMBB); 03010 03011 // LoopMBB: 03012 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 03013 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] 03014 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 03015 // %Dest = RLL %OldVal, BitSize(%BitShift) 03016 // ^^ The low BitSize bits contain the field 03017 // of interest. 03018 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 03019 // ^^ Replace the upper 32-BitSize bits of the 03020 // comparison value with those that we loaded, 03021 // so that we can use a full word comparison. 03022 // CR %Dest, %RetryCmpVal 03023 // JNE DoneMBB 03024 // # Fall through to SetMBB 03025 MBB = LoopMBB; 03026 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 03027 .addReg(OrigOldVal).addMBB(StartMBB) 03028 .addReg(RetryOldVal).addMBB(SetMBB); 03029 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) 03030 .addReg(OrigCmpVal).addMBB(StartMBB) 03031 .addReg(RetryCmpVal).addMBB(SetMBB); 03032 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 03033 .addReg(OrigSwapVal).addMBB(StartMBB) 03034 .addReg(RetrySwapVal).addMBB(SetMBB); 03035 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) 03036 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 03037 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) 03038 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 03039 BuildMI(MBB, DL, TII->get(SystemZ::CR)) 03040 .addReg(Dest).addReg(RetryCmpVal); 03041 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 03042 .addImm(SystemZ::CCMASK_ICMP) 03043 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); 03044 MBB->addSuccessor(DoneMBB); 03045 MBB->addSuccessor(SetMBB); 03046 03047 // SetMBB: 03048 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 03049 // ^^ Replace the upper 32-BitSize bits of the new 03050 // value with those that we loaded. 03051 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 03052 // ^^ Rotate the new field to its proper position. 03053 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) 03054 // JNE LoopMBB 03055 // # fall through to ExitMMB 03056 MBB = SetMBB; 03057 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 03058 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 03059 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 03060 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 03061 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 03062 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); 03063 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 03064 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 03065 MBB->addSuccessor(LoopMBB); 03066 MBB->addSuccessor(DoneMBB); 03067 03068 MI->eraseFromParent(); 03069 return DoneMBB; 03070 } 03071 03072 // Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true 03073 // if the high register of the GR128 value must be cleared or false if 03074 // it's "don't care". SubReg is subreg_l32 when extending a GR32 03075 // and subreg_l64 when extending a GR64. 03076 MachineBasicBlock * 03077 SystemZTargetLowering::emitExt128(MachineInstr *MI, 03078 MachineBasicBlock *MBB, 03079 bool ClearEven, unsigned SubReg) const { 03080 MachineFunction &MF = *MBB->getParent(); 03081 const SystemZInstrInfo *TII = 03082 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); 03083 MachineRegisterInfo &MRI = MF.getRegInfo(); 03084 DebugLoc DL = MI->getDebugLoc(); 03085 03086 unsigned Dest = MI->getOperand(0).getReg(); 03087 unsigned Src = MI->getOperand(1).getReg(); 03088 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 03089 03090 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 03091 if (ClearEven) { 03092 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 03093 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 03094 03095 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 03096 .addImm(0); 03097 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 03098 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64); 03099 In128 = NewIn128; 03100 } 03101 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 03102 .addReg(In128).addReg(Src).addImm(SubReg); 03103 03104 MI->eraseFromParent(); 03105 return MBB; 03106 } 03107 03108 MachineBasicBlock * 03109 SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI, 03110 MachineBasicBlock *MBB, 03111 unsigned Opcode) const { 03112 MachineFunction &MF = *MBB->getParent(); 03113 const SystemZInstrInfo *TII = 03114 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); 03115 MachineRegisterInfo &MRI = MF.getRegInfo(); 03116 DebugLoc DL = MI->getDebugLoc(); 03117 03118 MachineOperand DestBase = earlyUseOperand(MI->getOperand(0)); 03119 uint64_t DestDisp = MI->getOperand(1).getImm(); 03120 MachineOperand SrcBase = earlyUseOperand(MI->getOperand(2)); 03121 uint64_t SrcDisp = MI->getOperand(3).getImm(); 03122 uint64_t Length = MI->getOperand(4).getImm(); 03123 03124 // When generating more than one CLC, all but the last will need to 03125 // branch to the end when a difference is found. 03126 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ? 03127 splitBlockAfter(MI, MBB) : nullptr); 03128 03129 // Check for the loop form, in which operand 5 is the trip count. 03130 if (MI->getNumExplicitOperands() > 5) { 03131 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); 03132 03133 uint64_t StartCountReg = MI->getOperand(5).getReg(); 03134 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII); 03135 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg : 03136 forceReg(MI, DestBase, TII)); 03137 03138 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; 03139 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC); 03140 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg : 03141 MRI.createVirtualRegister(RC)); 03142 uint64_t NextSrcReg = MRI.createVirtualRegister(RC); 03143 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg : 03144 MRI.createVirtualRegister(RC)); 03145 03146 RC = &SystemZ::GR64BitRegClass; 03147 uint64_t ThisCountReg = MRI.createVirtualRegister(RC); 03148 uint64_t NextCountReg = MRI.createVirtualRegister(RC); 03149 03150 MachineBasicBlock *StartMBB = MBB; 03151 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 03152 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 03153 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB); 03154 03155 // StartMBB: 03156 // # fall through to LoopMMB 03157 MBB->addSuccessor(LoopMBB); 03158 03159 // LoopMBB: 03160 // %ThisDestReg = phi [ %StartDestReg, StartMBB ], 03161 // [ %NextDestReg, NextMBB ] 03162 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], 03163 // [ %NextSrcReg, NextMBB ] 03164 // %ThisCountReg = phi [ %StartCountReg, StartMBB ], 03165 // [ %NextCountReg, NextMBB ] 03166 // ( PFD 2, 768+DestDisp(%ThisDestReg) ) 03167 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) 03168 // ( JLH EndMBB ) 03169 // 03170 // The prefetch is used only for MVC. The JLH is used only for CLC. 03171 MBB = LoopMBB; 03172 03173 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) 03174 .addReg(StartDestReg).addMBB(StartMBB) 03175 .addReg(NextDestReg).addMBB(NextMBB); 03176 if (!HaveSingleBase) 03177 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) 03178 .addReg(StartSrcReg).addMBB(StartMBB) 03179 .addReg(NextSrcReg).addMBB(NextMBB); 03180 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) 03181 .addReg(StartCountReg).addMBB(StartMBB) 03182 .addReg(NextCountReg).addMBB(NextMBB); 03183 if (Opcode == SystemZ::MVC) 03184 BuildMI(MBB, DL, TII->get(SystemZ::PFD)) 03185 .addImm(SystemZ::PFD_WRITE) 03186 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0); 03187 BuildMI(MBB, DL, TII->get(Opcode)) 03188 .addReg(ThisDestReg).addImm(DestDisp).addImm(256) 03189 .addReg(ThisSrcReg).addImm(SrcDisp); 03190 if (EndMBB) { 03191 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 03192 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 03193 .addMBB(EndMBB); 03194 MBB->addSuccessor(EndMBB); 03195 MBB->addSuccessor(NextMBB); 03196 } 03197 03198 // NextMBB: 03199 // %NextDestReg = LA 256(%ThisDestReg) 03200 // %NextSrcReg = LA 256(%ThisSrcReg) 03201 // %NextCountReg = AGHI %ThisCountReg, -1 03202 // CGHI %NextCountReg, 0 03203 // JLH LoopMBB 03204 // # fall through to DoneMMB 03205 // 03206 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. 03207 MBB = NextMBB; 03208 03209 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) 03210 .addReg(ThisDestReg).addImm(256).addReg(0); 03211 if (!HaveSingleBase) 03212 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) 03213 .addReg(ThisSrcReg).addImm(256).addReg(0); 03214 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) 03215 .addReg(ThisCountReg).addImm(-1); 03216 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) 03217 .addReg(NextCountReg).addImm(0); 03218 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 03219 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 03220 .addMBB(LoopMBB); 03221 MBB->addSuccessor(LoopMBB); 03222 MBB->addSuccessor(DoneMBB); 03223 03224 DestBase = MachineOperand::CreateReg(NextDestReg, false); 03225 SrcBase = MachineOperand::CreateReg(NextSrcReg, false); 03226 Length &= 255; 03227 MBB = DoneMBB; 03228 } 03229 // Handle any remaining bytes with straight-line code. 03230 while (Length > 0) { 03231 uint64_t ThisLength = std::min(Length, uint64_t(256)); 03232 // The previous iteration might have created out-of-range displacements. 03233 // Apply them using LAY if so. 03234 if (!isUInt<12>(DestDisp)) { 03235 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 03236 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) 03237 .addOperand(DestBase).addImm(DestDisp).addReg(0); 03238 DestBase = MachineOperand::CreateReg(Reg, false); 03239 DestDisp = 0; 03240 } 03241 if (!isUInt<12>(SrcDisp)) { 03242 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 03243 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) 03244 .addOperand(SrcBase).addImm(SrcDisp).addReg(0); 03245 SrcBase = MachineOperand::CreateReg(Reg, false); 03246 SrcDisp = 0; 03247 } 03248 BuildMI(*MBB, MI, DL, TII->get(Opcode)) 03249 .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength) 03250 .addOperand(SrcBase).addImm(SrcDisp); 03251 DestDisp += ThisLength; 03252 SrcDisp += ThisLength; 03253 Length -= ThisLength; 03254 // If there's another CLC to go, branch to the end if a difference 03255 // was found. 03256 if (EndMBB && Length > 0) { 03257 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB); 03258 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 03259 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 03260 .addMBB(EndMBB); 03261 MBB->addSuccessor(EndMBB); 03262 MBB->addSuccessor(NextMBB); 03263 MBB = NextMBB; 03264 } 03265 } 03266 if (EndMBB) { 03267 MBB->addSuccessor(EndMBB); 03268 MBB = EndMBB; 03269 MBB->addLiveIn(SystemZ::CC); 03270 } 03271 03272 MI->eraseFromParent(); 03273 return MBB; 03274 } 03275 03276 // Decompose string pseudo-instruction MI into a loop that continually performs 03277 // Opcode until CC != 3. 03278 MachineBasicBlock * 03279 SystemZTargetLowering::emitStringWrapper(MachineInstr *MI, 03280 MachineBasicBlock *MBB, 03281 unsigned Opcode) const { 03282 MachineFunction &MF = *MBB->getParent(); 03283 const SystemZInstrInfo *TII = 03284 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); 03285 MachineRegisterInfo &MRI = MF.getRegInfo(); 03286 DebugLoc DL = MI->getDebugLoc(); 03287 03288 uint64_t End1Reg = MI->getOperand(0).getReg(); 03289 uint64_t Start1Reg = MI->getOperand(1).getReg(); 03290 uint64_t Start2Reg = MI->getOperand(2).getReg(); 03291 uint64_t CharReg = MI->getOperand(3).getReg(); 03292 03293 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; 03294 uint64_t This1Reg = MRI.createVirtualRegister(RC); 03295 uint64_t This2Reg = MRI.createVirtualRegister(RC); 03296 uint64_t End2Reg = MRI.createVirtualRegister(RC); 03297 03298 MachineBasicBlock *StartMBB = MBB; 03299 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 03300 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 03301 03302 // StartMBB: 03303 // # fall through to LoopMMB 03304 MBB->addSuccessor(LoopMBB); 03305 03306 // LoopMBB: 03307 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] 03308 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] 03309 // R0L = %CharReg 03310 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L 03311 // JO LoopMBB 03312 // # fall through to DoneMMB 03313 // 03314 // The load of R0L can be hoisted by post-RA LICM. 03315 MBB = LoopMBB; 03316 03317 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) 03318 .addReg(Start1Reg).addMBB(StartMBB) 03319 .addReg(End1Reg).addMBB(LoopMBB); 03320 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) 03321 .addReg(Start2Reg).addMBB(StartMBB) 03322 .addReg(End2Reg).addMBB(LoopMBB); 03323 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); 03324 BuildMI(MBB, DL, TII->get(Opcode)) 03325 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) 03326 .addReg(This1Reg).addReg(This2Reg); 03327 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 03328 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); 03329 MBB->addSuccessor(LoopMBB); 03330 MBB->addSuccessor(DoneMBB); 03331 03332 DoneMBB->addLiveIn(SystemZ::CC); 03333 03334 MI->eraseFromParent(); 03335 return DoneMBB; 03336 } 03337 03338 MachineBasicBlock *SystemZTargetLowering:: 03339 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { 03340 switch (MI->getOpcode()) { 03341 case SystemZ::Select32Mux: 03342 case SystemZ::Select32: 03343 case SystemZ::SelectF32: 03344 case SystemZ::Select64: 03345 case SystemZ::SelectF64: 03346 case SystemZ::SelectF128: 03347 return emitSelect(MI, MBB); 03348 03349 case SystemZ::CondStore8Mux: 03350 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false); 03351 case SystemZ::CondStore8MuxInv: 03352 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true); 03353 case SystemZ::CondStore16Mux: 03354 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false); 03355 case SystemZ::CondStore16MuxInv: 03356 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true); 03357 case SystemZ::CondStore8: 03358 return emitCondStore(MI, MBB, SystemZ::STC, 0, false); 03359 case SystemZ::CondStore8Inv: 03360 return emitCondStore(MI, MBB, SystemZ::STC, 0, true); 03361 case SystemZ::CondStore16: 03362 return emitCondStore(MI, MBB, SystemZ::STH, 0, false); 03363 case SystemZ::CondStore16Inv: 03364 return emitCondStore(MI, MBB, SystemZ::STH, 0, true); 03365 case SystemZ::CondStore32: 03366 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); 03367 case SystemZ::CondStore32Inv: 03368 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); 03369 case SystemZ::CondStore64: 03370 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); 03371 case SystemZ::CondStore64Inv: 03372 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); 03373 case SystemZ::CondStoreF32: 03374 return emitCondStore(MI, MBB, SystemZ::STE, 0, false); 03375 case SystemZ::CondStoreF32Inv: 03376 return emitCondStore(MI, MBB, SystemZ::STE, 0, true); 03377 case SystemZ::CondStoreF64: 03378 return emitCondStore(MI, MBB, SystemZ::STD, 0, false); 03379 case SystemZ::CondStoreF64Inv: 03380 return emitCondStore(MI, MBB, SystemZ::STD, 0, true); 03381 03382 case SystemZ::AEXT128_64: 03383 return emitExt128(MI, MBB, false, SystemZ::subreg_l64); 03384 case SystemZ::ZEXT128_32: 03385 return emitExt128(MI, MBB, true, SystemZ::subreg_l32); 03386 case SystemZ::ZEXT128_64: 03387 return emitExt128(MI, MBB, true, SystemZ::subreg_l64); 03388 03389 case SystemZ::ATOMIC_SWAPW: 03390 return emitAtomicLoadBinary(MI, MBB, 0, 0); 03391 case SystemZ::ATOMIC_SWAP_32: 03392 return emitAtomicLoadBinary(MI, MBB, 0, 32); 03393 case SystemZ::ATOMIC_SWAP_64: 03394 return emitAtomicLoadBinary(MI, MBB, 0, 64); 03395 03396 case SystemZ::ATOMIC_LOADW_AR: 03397 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 03398 case SystemZ::ATOMIC_LOADW_AFI: 03399 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 03400 case SystemZ::ATOMIC_LOAD_AR: 03401 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 03402 case SystemZ::ATOMIC_LOAD_AHI: 03403 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 03404 case SystemZ::ATOMIC_LOAD_AFI: 03405 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 03406 case SystemZ::ATOMIC_LOAD_AGR: 03407 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 03408 case SystemZ::ATOMIC_LOAD_AGHI: 03409 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 03410 case SystemZ::ATOMIC_LOAD_AGFI: 03411 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 03412 03413 case SystemZ::ATOMIC_LOADW_SR: 03414 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 03415 case SystemZ::ATOMIC_LOAD_SR: 03416 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 03417 case SystemZ::ATOMIC_LOAD_SGR: 03418 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 03419 03420 case SystemZ::ATOMIC_LOADW_NR: 03421 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 03422 case SystemZ::ATOMIC_LOADW_NILH: 03423 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0); 03424 case SystemZ::ATOMIC_LOAD_NR: 03425 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 03426 case SystemZ::ATOMIC_LOAD_NILL: 03427 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32); 03428 case SystemZ::ATOMIC_LOAD_NILH: 03429 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32); 03430 case SystemZ::ATOMIC_LOAD_NILF: 03431 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32); 03432 case SystemZ::ATOMIC_LOAD_NGR: 03433 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 03434 case SystemZ::ATOMIC_LOAD_NILL64: 03435 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64); 03436 case SystemZ::ATOMIC_LOAD_NILH64: 03437 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64); 03438 case SystemZ::ATOMIC_LOAD_NIHL64: 03439 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64); 03440 case SystemZ::ATOMIC_LOAD_NIHH64: 03441 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64); 03442 case SystemZ::ATOMIC_LOAD_NILF64: 03443 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64); 03444 case SystemZ::ATOMIC_LOAD_NIHF64: 03445 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64); 03446 03447 case SystemZ::ATOMIC_LOADW_OR: 03448 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 03449 case SystemZ::ATOMIC_LOADW_OILH: 03450 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0); 03451 case SystemZ::ATOMIC_LOAD_OR: 03452 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 03453 case SystemZ::ATOMIC_LOAD_OILL: 03454 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32); 03455 case SystemZ::ATOMIC_LOAD_OILH: 03456 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32); 03457 case SystemZ::ATOMIC_LOAD_OILF: 03458 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32); 03459 case SystemZ::ATOMIC_LOAD_OGR: 03460 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 03461 case SystemZ::ATOMIC_LOAD_OILL64: 03462 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64); 03463 case SystemZ::ATOMIC_LOAD_OILH64: 03464 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64); 03465 case SystemZ::ATOMIC_LOAD_OIHL64: 03466 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64); 03467 case SystemZ::ATOMIC_LOAD_OIHH64: 03468 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64); 03469 case SystemZ::ATOMIC_LOAD_OILF64: 03470 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64); 03471 case SystemZ::ATOMIC_LOAD_OIHF64: 03472 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64); 03473 03474 case SystemZ::ATOMIC_LOADW_XR: 03475 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 03476 case SystemZ::ATOMIC_LOADW_XILF: 03477 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0); 03478 case SystemZ::ATOMIC_LOAD_XR: 03479 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 03480 case SystemZ::ATOMIC_LOAD_XILF: 03481 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32); 03482 case SystemZ::ATOMIC_LOAD_XGR: 03483 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 03484 case SystemZ::ATOMIC_LOAD_XILF64: 03485 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64); 03486 case SystemZ::ATOMIC_LOAD_XIHF64: 03487 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64); 03488 03489 case SystemZ::ATOMIC_LOADW_NRi: 03490 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 03491 case SystemZ::ATOMIC_LOADW_NILHi: 03492 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true); 03493 case SystemZ::ATOMIC_LOAD_NRi: 03494 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 03495 case SystemZ::ATOMIC_LOAD_NILLi: 03496 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true); 03497 case SystemZ::ATOMIC_LOAD_NILHi: 03498 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true); 03499 case SystemZ::ATOMIC_LOAD_NILFi: 03500 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true); 03501 case SystemZ::ATOMIC_LOAD_NGRi: 03502 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 03503 case SystemZ::ATOMIC_LOAD_NILL64i: 03504 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true); 03505 case SystemZ::ATOMIC_LOAD_NILH64i: 03506 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true); 03507 case SystemZ::ATOMIC_LOAD_NIHL64i: 03508 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true); 03509 case SystemZ::ATOMIC_LOAD_NIHH64i: 03510 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true); 03511 case SystemZ::ATOMIC_LOAD_NILF64i: 03512 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true); 03513 case SystemZ::ATOMIC_LOAD_NIHF64i: 03514 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true); 03515 03516 case SystemZ::ATOMIC_LOADW_MIN: 03517 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 03518 SystemZ::CCMASK_CMP_LE, 0); 03519 case SystemZ::ATOMIC_LOAD_MIN_32: 03520 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 03521 SystemZ::CCMASK_CMP_LE, 32); 03522 case SystemZ::ATOMIC_LOAD_MIN_64: 03523 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 03524 SystemZ::CCMASK_CMP_LE, 64); 03525 03526 case SystemZ::ATOMIC_LOADW_MAX: 03527 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 03528 SystemZ::CCMASK_CMP_GE, 0); 03529 case SystemZ::ATOMIC_LOAD_MAX_32: 03530 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 03531 SystemZ::CCMASK_CMP_GE, 32); 03532 case SystemZ::ATOMIC_LOAD_MAX_64: 03533 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 03534 SystemZ::CCMASK_CMP_GE, 64); 03535 03536 case SystemZ::ATOMIC_LOADW_UMIN: 03537 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 03538 SystemZ::CCMASK_CMP_LE, 0); 03539 case SystemZ::ATOMIC_LOAD_UMIN_32: 03540 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 03541 SystemZ::CCMASK_CMP_LE, 32); 03542 case SystemZ::ATOMIC_LOAD_UMIN_64: 03543 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 03544 SystemZ::CCMASK_CMP_LE, 64); 03545 03546 case SystemZ::ATOMIC_LOADW_UMAX: 03547 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 03548 SystemZ::CCMASK_CMP_GE, 0); 03549 case SystemZ::ATOMIC_LOAD_UMAX_32: 03550 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 03551 SystemZ::CCMASK_CMP_GE, 32); 03552 case SystemZ::ATOMIC_LOAD_UMAX_64: 03553 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 03554 SystemZ::CCMASK_CMP_GE, 64); 03555 03556 case SystemZ::ATOMIC_CMP_SWAPW: 03557 return emitAtomicCmpSwapW(MI, MBB); 03558 case SystemZ::MVCSequence: 03559 case SystemZ::MVCLoop: 03560 return emitMemMemWrapper(MI, MBB, SystemZ::MVC); 03561 case SystemZ::NCSequence: 03562 case SystemZ::NCLoop: 03563 return emitMemMemWrapper(MI, MBB, SystemZ::NC); 03564 case SystemZ::OCSequence: 03565 case SystemZ::OCLoop: 03566 return emitMemMemWrapper(MI, MBB, SystemZ::OC); 03567 case SystemZ::XCSequence: 03568 case SystemZ::XCLoop: 03569 return emitMemMemWrapper(MI, MBB, SystemZ::XC); 03570 case SystemZ::CLCSequence: 03571 case SystemZ::CLCLoop: 03572 return emitMemMemWrapper(MI, MBB, SystemZ::CLC); 03573 case SystemZ::CLSTLoop: 03574 return emitStringWrapper(MI, MBB, SystemZ::CLST); 03575 case SystemZ::MVSTLoop: 03576 return emitStringWrapper(MI, MBB, SystemZ::MVST); 03577 case SystemZ::SRSTLoop: 03578 return emitStringWrapper(MI, MBB, SystemZ::SRST); 03579 default: 03580 llvm_unreachable("Unexpected instr type to insert"); 03581 } 03582 }