LLVM API Documentation
00001 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This file implements the interfaces that Sparc uses to lower LLVM code into a 00011 // selection DAG. 00012 // 00013 //===----------------------------------------------------------------------===// 00014 00015 #include "SparcISelLowering.h" 00016 #include "MCTargetDesc/SparcMCExpr.h" 00017 #include "SparcMachineFunctionInfo.h" 00018 #include "SparcRegisterInfo.h" 00019 #include "SparcTargetMachine.h" 00020 #include "SparcTargetObjectFile.h" 00021 #include "llvm/CodeGen/CallingConvLower.h" 00022 #include "llvm/CodeGen/MachineFrameInfo.h" 00023 #include "llvm/CodeGen/MachineFunction.h" 00024 #include "llvm/CodeGen/MachineInstrBuilder.h" 00025 #include "llvm/CodeGen/MachineRegisterInfo.h" 00026 #include "llvm/CodeGen/SelectionDAG.h" 00027 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 00028 #include "llvm/IR/DerivedTypes.h" 00029 #include "llvm/IR/Function.h" 00030 #include "llvm/IR/Module.h" 00031 #include "llvm/Support/ErrorHandling.h" 00032 using namespace llvm; 00033 00034 00035 //===----------------------------------------------------------------------===// 00036 // Calling Convention Implementation 00037 //===----------------------------------------------------------------------===// 00038 00039 static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, 00040 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 00041 ISD::ArgFlagsTy &ArgFlags, CCState &State) 00042 { 00043 assert (ArgFlags.isSRet()); 00044 00045 // Assign SRet argument. 00046 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 00047 0, 00048 LocVT, LocInfo)); 00049 return true; 00050 } 00051 00052 static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT, 00053 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 00054 ISD::ArgFlagsTy &ArgFlags, CCState &State) 00055 { 00056 static const MCPhysReg RegList[] = { 00057 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 00058 }; 00059 // Try to get first reg. 00060 if (unsigned Reg = State.AllocateReg(RegList, 6)) { 00061 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 00062 } else { 00063 // Assign whole thing in stack. 00064 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 00065 State.AllocateStack(8,4), 00066 LocVT, LocInfo)); 00067 return true; 00068 } 00069 00070 // Try to get second reg. 00071 if (unsigned Reg = State.AllocateReg(RegList, 6)) 00072 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 00073 else 00074 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 00075 State.AllocateStack(4,4), 00076 LocVT, LocInfo)); 00077 return true; 00078 } 00079 00080 // Allocate a full-sized argument for the 64-bit ABI. 00081 static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, 00082 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 00083 ISD::ArgFlagsTy &ArgFlags, CCState &State) { 00084 assert((LocVT == MVT::f32 || LocVT == MVT::f128 00085 || LocVT.getSizeInBits() == 64) && 00086 "Can't handle non-64 bits locations"); 00087 00088 // Stack space is allocated for all arguments starting from [%fp+BIAS+128]. 00089 unsigned size = (LocVT == MVT::f128) ? 16 : 8; 00090 unsigned alignment = (LocVT == MVT::f128) ? 16 : 8; 00091 unsigned Offset = State.AllocateStack(size, alignment); 00092 unsigned Reg = 0; 00093 00094 if (LocVT == MVT::i64 && Offset < 6*8) 00095 // Promote integers to %i0-%i5. 00096 Reg = SP::I0 + Offset/8; 00097 else if (LocVT == MVT::f64 && Offset < 16*8) 00098 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15). 00099 Reg = SP::D0 + Offset/8; 00100 else if (LocVT == MVT::f32 && Offset < 16*8) 00101 // Promote floats to %f1, %f3, ... 00102 Reg = SP::F1 + Offset/4; 00103 else if (LocVT == MVT::f128 && Offset < 16*8) 00104 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7). 00105 Reg = SP::Q0 + Offset/16; 00106 00107 // Promote to register when possible, otherwise use the stack slot. 00108 if (Reg) { 00109 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 00110 return true; 00111 } 00112 00113 // This argument goes on the stack in an 8-byte slot. 00114 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to 00115 // the right-aligned float. The first 4 bytes of the stack slot are undefined. 00116 if (LocVT == MVT::f32) 00117 Offset += 4; 00118 00119 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 00120 return true; 00121 } 00122 00123 // Allocate a half-sized argument for the 64-bit ABI. 00124 // 00125 // This is used when passing { float, int } structs by value in registers. 00126 static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, 00127 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 00128 ISD::ArgFlagsTy &ArgFlags, CCState &State) { 00129 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations"); 00130 unsigned Offset = State.AllocateStack(4, 4); 00131 00132 if (LocVT == MVT::f32 && Offset < 16*8) { 00133 // Promote floats to %f0-%f31. 00134 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4, 00135 LocVT, LocInfo)); 00136 return true; 00137 } 00138 00139 if (LocVT == MVT::i32 && Offset < 6*8) { 00140 // Promote integers to %i0-%i5, using half the register. 00141 unsigned Reg = SP::I0 + Offset/8; 00142 LocVT = MVT::i64; 00143 LocInfo = CCValAssign::AExt; 00144 00145 // Set the Custom bit if this i32 goes in the high bits of a register. 00146 if (Offset % 8 == 0) 00147 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, 00148 LocVT, LocInfo)); 00149 else 00150 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 00151 return true; 00152 } 00153 00154 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 00155 return true; 00156 } 00157 00158 #include "SparcGenCallingConv.inc" 00159 00160 // The calling conventions in SparcCallingConv.td are described in terms of the 00161 // callee's register window. This function translates registers to the 00162 // corresponding caller window %o register. 00163 static unsigned toCallerWindow(unsigned Reg) { 00164 assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7 && "Unexpected enum"); 00165 if (Reg >= SP::I0 && Reg <= SP::I7) 00166 return Reg - SP::I0 + SP::O0; 00167 return Reg; 00168 } 00169 00170 SDValue 00171 SparcTargetLowering::LowerReturn(SDValue Chain, 00172 CallingConv::ID CallConv, bool IsVarArg, 00173 const SmallVectorImpl<ISD::OutputArg> &Outs, 00174 const SmallVectorImpl<SDValue> &OutVals, 00175 SDLoc DL, SelectionDAG &DAG) const { 00176 if (Subtarget->is64Bit()) 00177 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG); 00178 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG); 00179 } 00180 00181 SDValue 00182 SparcTargetLowering::LowerReturn_32(SDValue Chain, 00183 CallingConv::ID CallConv, bool IsVarArg, 00184 const SmallVectorImpl<ISD::OutputArg> &Outs, 00185 const SmallVectorImpl<SDValue> &OutVals, 00186 SDLoc DL, SelectionDAG &DAG) const { 00187 MachineFunction &MF = DAG.getMachineFunction(); 00188 00189 // CCValAssign - represent the assignment of the return value to locations. 00190 SmallVector<CCValAssign, 16> RVLocs; 00191 00192 // CCState - Info about the registers and stack slot. 00193 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 00194 *DAG.getContext()); 00195 00196 // Analyze return values. 00197 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32); 00198 00199 SDValue Flag; 00200 SmallVector<SDValue, 4> RetOps(1, Chain); 00201 // Make room for the return address offset. 00202 RetOps.push_back(SDValue()); 00203 00204 // Copy the result values into the output registers. 00205 for (unsigned i = 0; i != RVLocs.size(); ++i) { 00206 CCValAssign &VA = RVLocs[i]; 00207 assert(VA.isRegLoc() && "Can only return in registers!"); 00208 00209 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), 00210 OutVals[i], Flag); 00211 00212 // Guarantee that all emitted copies are stuck together with flags. 00213 Flag = Chain.getValue(1); 00214 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 00215 } 00216 00217 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot 00218 // If the function returns a struct, copy the SRetReturnReg to I0 00219 if (MF.getFunction()->hasStructRetAttr()) { 00220 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>(); 00221 unsigned Reg = SFI->getSRetReturnReg(); 00222 if (!Reg) 00223 llvm_unreachable("sret virtual register not created in the entry block"); 00224 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy()); 00225 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag); 00226 Flag = Chain.getValue(1); 00227 RetOps.push_back(DAG.getRegister(SP::I0, getPointerTy())); 00228 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp 00229 } 00230 00231 RetOps[0] = Chain; // Update chain. 00232 RetOps[1] = DAG.getConstant(RetAddrOffset, MVT::i32); 00233 00234 // Add the flag if we have it. 00235 if (Flag.getNode()) 00236 RetOps.push_back(Flag); 00237 00238 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps); 00239 } 00240 00241 // Lower return values for the 64-bit ABI. 00242 // Return values are passed the exactly the same way as function arguments. 00243 SDValue 00244 SparcTargetLowering::LowerReturn_64(SDValue Chain, 00245 CallingConv::ID CallConv, bool IsVarArg, 00246 const SmallVectorImpl<ISD::OutputArg> &Outs, 00247 const SmallVectorImpl<SDValue> &OutVals, 00248 SDLoc DL, SelectionDAG &DAG) const { 00249 // CCValAssign - represent the assignment of the return value to locations. 00250 SmallVector<CCValAssign, 16> RVLocs; 00251 00252 // CCState - Info about the registers and stack slot. 00253 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 00254 *DAG.getContext()); 00255 00256 // Analyze return values. 00257 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64); 00258 00259 SDValue Flag; 00260 SmallVector<SDValue, 4> RetOps(1, Chain); 00261 00262 // The second operand on the return instruction is the return address offset. 00263 // The return address is always %i7+8 with the 64-bit ABI. 00264 RetOps.push_back(DAG.getConstant(8, MVT::i32)); 00265 00266 // Copy the result values into the output registers. 00267 for (unsigned i = 0; i != RVLocs.size(); ++i) { 00268 CCValAssign &VA = RVLocs[i]; 00269 assert(VA.isRegLoc() && "Can only return in registers!"); 00270 SDValue OutVal = OutVals[i]; 00271 00272 // Integer return values must be sign or zero extended by the callee. 00273 switch (VA.getLocInfo()) { 00274 case CCValAssign::Full: break; 00275 case CCValAssign::SExt: 00276 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal); 00277 break; 00278 case CCValAssign::ZExt: 00279 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal); 00280 break; 00281 case CCValAssign::AExt: 00282 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal); 00283 break; 00284 default: 00285 llvm_unreachable("Unknown loc info!"); 00286 } 00287 00288 // The custom bit on an i32 return value indicates that it should be passed 00289 // in the high bits of the register. 00290 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { 00291 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal, 00292 DAG.getConstant(32, MVT::i32)); 00293 00294 // The next value may go in the low bits of the same register. 00295 // Handle both at once. 00296 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) { 00297 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]); 00298 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV); 00299 // Skip the next value, it's already done. 00300 ++i; 00301 } 00302 } 00303 00304 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag); 00305 00306 // Guarantee that all emitted copies are stuck together with flags. 00307 Flag = Chain.getValue(1); 00308 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 00309 } 00310 00311 RetOps[0] = Chain; // Update chain. 00312 00313 // Add the flag if we have it. 00314 if (Flag.getNode()) 00315 RetOps.push_back(Flag); 00316 00317 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps); 00318 } 00319 00320 SDValue SparcTargetLowering:: 00321 LowerFormalArguments(SDValue Chain, 00322 CallingConv::ID CallConv, 00323 bool IsVarArg, 00324 const SmallVectorImpl<ISD::InputArg> &Ins, 00325 SDLoc DL, 00326 SelectionDAG &DAG, 00327 SmallVectorImpl<SDValue> &InVals) const { 00328 if (Subtarget->is64Bit()) 00329 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins, 00330 DL, DAG, InVals); 00331 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins, 00332 DL, DAG, InVals); 00333 } 00334 00335 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are 00336 /// passed in either one or two GPRs, including FP values. TODO: we should 00337 /// pass FP values in FP registers for fastcc functions. 00338 SDValue SparcTargetLowering:: 00339 LowerFormalArguments_32(SDValue Chain, 00340 CallingConv::ID CallConv, 00341 bool isVarArg, 00342 const SmallVectorImpl<ISD::InputArg> &Ins, 00343 SDLoc dl, 00344 SelectionDAG &DAG, 00345 SmallVectorImpl<SDValue> &InVals) const { 00346 MachineFunction &MF = DAG.getMachineFunction(); 00347 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 00348 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 00349 00350 // Assign locations to all of the incoming arguments. 00351 SmallVector<CCValAssign, 16> ArgLocs; 00352 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 00353 *DAG.getContext()); 00354 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32); 00355 00356 const unsigned StackOffset = 92; 00357 00358 unsigned InIdx = 0; 00359 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) { 00360 CCValAssign &VA = ArgLocs[i]; 00361 00362 if (Ins[InIdx].Flags.isSRet()) { 00363 if (InIdx != 0) 00364 report_fatal_error("sparc only supports sret on the first parameter"); 00365 // Get SRet from [%fp+64]. 00366 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, 64, true); 00367 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 00368 SDValue Arg = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, 00369 MachinePointerInfo(), 00370 false, false, false, 0); 00371 InVals.push_back(Arg); 00372 continue; 00373 } 00374 00375 if (VA.isRegLoc()) { 00376 if (VA.needsCustom()) { 00377 assert(VA.getLocVT() == MVT::f64); 00378 unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 00379 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi); 00380 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32); 00381 00382 assert(i+1 < e); 00383 CCValAssign &NextVA = ArgLocs[++i]; 00384 00385 SDValue LoVal; 00386 if (NextVA.isMemLoc()) { 00387 int FrameIdx = MF.getFrameInfo()-> 00388 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true); 00389 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 00390 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, 00391 MachinePointerInfo(), 00392 false, false, false, 0); 00393 } else { 00394 unsigned loReg = MF.addLiveIn(NextVA.getLocReg(), 00395 &SP::IntRegsRegClass); 00396 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32); 00397 } 00398 SDValue WholeValue = 00399 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); 00400 WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue); 00401 InVals.push_back(WholeValue); 00402 continue; 00403 } 00404 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 00405 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg); 00406 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 00407 if (VA.getLocVT() == MVT::f32) 00408 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg); 00409 else if (VA.getLocVT() != MVT::i32) { 00410 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg, 00411 DAG.getValueType(VA.getLocVT())); 00412 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg); 00413 } 00414 InVals.push_back(Arg); 00415 continue; 00416 } 00417 00418 assert(VA.isMemLoc()); 00419 00420 unsigned Offset = VA.getLocMemOffset()+StackOffset; 00421 00422 if (VA.needsCustom()) { 00423 assert(VA.getValVT() == MVT::f64); 00424 // If it is double-word aligned, just load. 00425 if (Offset % 8 == 0) { 00426 int FI = MF.getFrameInfo()->CreateFixedObject(8, 00427 Offset, 00428 true); 00429 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); 00430 SDValue Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, 00431 MachinePointerInfo(), 00432 false,false, false, 0); 00433 InVals.push_back(Load); 00434 continue; 00435 } 00436 00437 int FI = MF.getFrameInfo()->CreateFixedObject(4, 00438 Offset, 00439 true); 00440 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); 00441 SDValue HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, 00442 MachinePointerInfo(), 00443 false, false, false, 0); 00444 int FI2 = MF.getFrameInfo()->CreateFixedObject(4, 00445 Offset+4, 00446 true); 00447 SDValue FIPtr2 = DAG.getFrameIndex(FI2, getPointerTy()); 00448 00449 SDValue LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, 00450 MachinePointerInfo(), 00451 false, false, false, 0); 00452 00453 SDValue WholeValue = 00454 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); 00455 WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue); 00456 InVals.push_back(WholeValue); 00457 continue; 00458 } 00459 00460 int FI = MF.getFrameInfo()->CreateFixedObject(4, 00461 Offset, 00462 true); 00463 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); 00464 SDValue Load ; 00465 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) { 00466 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, 00467 MachinePointerInfo(), 00468 false, false, false, 0); 00469 } else { 00470 ISD::LoadExtType LoadOp = ISD::SEXTLOAD; 00471 // Sparc is big endian, so add an offset based on the ObjectVT. 00472 unsigned Offset = 4-std::max(1U, VA.getValVT().getSizeInBits()/8); 00473 FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr, 00474 DAG.getConstant(Offset, MVT::i32)); 00475 Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr, 00476 MachinePointerInfo(), 00477 VA.getValVT(), false, false, false,0); 00478 Load = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Load); 00479 } 00480 InVals.push_back(Load); 00481 } 00482 00483 if (MF.getFunction()->hasStructRetAttr()) { 00484 // Copy the SRet Argument to SRetReturnReg. 00485 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>(); 00486 unsigned Reg = SFI->getSRetReturnReg(); 00487 if (!Reg) { 00488 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass); 00489 SFI->setSRetReturnReg(Reg); 00490 } 00491 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 00492 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 00493 } 00494 00495 // Store remaining ArgRegs to the stack if this is a varargs function. 00496 if (isVarArg) { 00497 static const MCPhysReg ArgRegs[] = { 00498 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 00499 }; 00500 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs, 6); 00501 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6; 00502 unsigned ArgOffset = CCInfo.getNextStackOffset(); 00503 if (NumAllocated == 6) 00504 ArgOffset += StackOffset; 00505 else { 00506 assert(!ArgOffset); 00507 ArgOffset = 68+4*NumAllocated; 00508 } 00509 00510 // Remember the vararg offset for the va_start implementation. 00511 FuncInfo->setVarArgsFrameOffset(ArgOffset); 00512 00513 std::vector<SDValue> OutChains; 00514 00515 for (; CurArgReg != ArgRegEnd; ++CurArgReg) { 00516 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 00517 MF.getRegInfo().addLiveIn(*CurArgReg, VReg); 00518 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32); 00519 00520 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset, 00521 true); 00522 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 00523 00524 OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, 00525 MachinePointerInfo(), 00526 false, false, 0)); 00527 ArgOffset += 4; 00528 } 00529 00530 if (!OutChains.empty()) { 00531 OutChains.push_back(Chain); 00532 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 00533 } 00534 } 00535 00536 return Chain; 00537 } 00538 00539 // Lower formal arguments for the 64 bit ABI. 00540 SDValue SparcTargetLowering:: 00541 LowerFormalArguments_64(SDValue Chain, 00542 CallingConv::ID CallConv, 00543 bool IsVarArg, 00544 const SmallVectorImpl<ISD::InputArg> &Ins, 00545 SDLoc DL, 00546 SelectionDAG &DAG, 00547 SmallVectorImpl<SDValue> &InVals) const { 00548 MachineFunction &MF = DAG.getMachineFunction(); 00549 00550 // Analyze arguments according to CC_Sparc64. 00551 SmallVector<CCValAssign, 16> ArgLocs; 00552 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, 00553 *DAG.getContext()); 00554 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64); 00555 00556 // The argument array begins at %fp+BIAS+128, after the register save area. 00557 const unsigned ArgArea = 128; 00558 00559 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 00560 CCValAssign &VA = ArgLocs[i]; 00561 if (VA.isRegLoc()) { 00562 // This argument is passed in a register. 00563 // All integer register arguments are promoted by the caller to i64. 00564 00565 // Create a virtual register for the promoted live-in value. 00566 unsigned VReg = MF.addLiveIn(VA.getLocReg(), 00567 getRegClassFor(VA.getLocVT())); 00568 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT()); 00569 00570 // Get the high bits for i32 struct elements. 00571 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) 00572 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg, 00573 DAG.getConstant(32, MVT::i32)); 00574 00575 // The caller promoted the argument, so insert an Assert?ext SDNode so we 00576 // won't promote the value again in this function. 00577 switch (VA.getLocInfo()) { 00578 case CCValAssign::SExt: 00579 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg, 00580 DAG.getValueType(VA.getValVT())); 00581 break; 00582 case CCValAssign::ZExt: 00583 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg, 00584 DAG.getValueType(VA.getValVT())); 00585 break; 00586 default: 00587 break; 00588 } 00589 00590 // Truncate the register down to the argument type. 00591 if (VA.isExtInLoc()) 00592 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg); 00593 00594 InVals.push_back(Arg); 00595 continue; 00596 } 00597 00598 // The registers are exhausted. This argument was passed on the stack. 00599 assert(VA.isMemLoc()); 00600 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the 00601 // beginning of the arguments area at %fp+BIAS+128. 00602 unsigned Offset = VA.getLocMemOffset() + ArgArea; 00603 unsigned ValSize = VA.getValVT().getSizeInBits() / 8; 00604 // Adjust offset for extended arguments, SPARC is big-endian. 00605 // The caller will have written the full slot with extended bytes, but we 00606 // prefer our own extending loads. 00607 if (VA.isExtInLoc()) 00608 Offset += 8 - ValSize; 00609 int FI = MF.getFrameInfo()->CreateFixedObject(ValSize, Offset, true); 00610 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, 00611 DAG.getFrameIndex(FI, getPointerTy()), 00612 MachinePointerInfo::getFixedStack(FI), 00613 false, false, false, 0)); 00614 } 00615 00616 if (!IsVarArg) 00617 return Chain; 00618 00619 // This function takes variable arguments, some of which may have been passed 00620 // in registers %i0-%i5. Variable floating point arguments are never passed 00621 // in floating point registers. They go on %i0-%i5 or on the stack like 00622 // integer arguments. 00623 // 00624 // The va_start intrinsic needs to know the offset to the first variable 00625 // argument. 00626 unsigned ArgOffset = CCInfo.getNextStackOffset(); 00627 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 00628 // Skip the 128 bytes of register save area. 00629 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea + 00630 Subtarget->getStackPointerBias()); 00631 00632 // Save the variable arguments that were passed in registers. 00633 // The caller is required to reserve stack space for 6 arguments regardless 00634 // of how many arguments were actually passed. 00635 SmallVector<SDValue, 8> OutChains; 00636 for (; ArgOffset < 6*8; ArgOffset += 8) { 00637 unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass); 00638 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); 00639 int FI = MF.getFrameInfo()->CreateFixedObject(8, ArgOffset + ArgArea, true); 00640 OutChains.push_back(DAG.getStore(Chain, DL, VArg, 00641 DAG.getFrameIndex(FI, getPointerTy()), 00642 MachinePointerInfo::getFixedStack(FI), 00643 false, false, 0)); 00644 } 00645 00646 if (!OutChains.empty()) 00647 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 00648 00649 return Chain; 00650 } 00651 00652 SDValue 00653 SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 00654 SmallVectorImpl<SDValue> &InVals) const { 00655 if (Subtarget->is64Bit()) 00656 return LowerCall_64(CLI, InVals); 00657 return LowerCall_32(CLI, InVals); 00658 } 00659 00660 static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, 00661 ImmutableCallSite *CS) { 00662 if (CS) 00663 return CS->hasFnAttr(Attribute::ReturnsTwice); 00664 00665 const Function *CalleeFn = nullptr; 00666 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 00667 CalleeFn = dyn_cast<Function>(G->getGlobal()); 00668 } else if (ExternalSymbolSDNode *E = 00669 dyn_cast<ExternalSymbolSDNode>(Callee)) { 00670 const Function *Fn = DAG.getMachineFunction().getFunction(); 00671 const Module *M = Fn->getParent(); 00672 const char *CalleeName = E->getSymbol(); 00673 CalleeFn = M->getFunction(CalleeName); 00674 } 00675 00676 if (!CalleeFn) 00677 return false; 00678 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice); 00679 } 00680 00681 // Lower a call for the 32-bit ABI. 00682 SDValue 00683 SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, 00684 SmallVectorImpl<SDValue> &InVals) const { 00685 SelectionDAG &DAG = CLI.DAG; 00686 SDLoc &dl = CLI.DL; 00687 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 00688 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 00689 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 00690 SDValue Chain = CLI.Chain; 00691 SDValue Callee = CLI.Callee; 00692 bool &isTailCall = CLI.IsTailCall; 00693 CallingConv::ID CallConv = CLI.CallConv; 00694 bool isVarArg = CLI.IsVarArg; 00695 00696 // Sparc target does not yet support tail call optimization. 00697 isTailCall = false; 00698 00699 // Analyze operands of the call, assigning locations to each operand. 00700 SmallVector<CCValAssign, 16> ArgLocs; 00701 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 00702 *DAG.getContext()); 00703 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32); 00704 00705 // Get the size of the outgoing arguments stack space requirement. 00706 unsigned ArgsSize = CCInfo.getNextStackOffset(); 00707 00708 // Keep stack frames 8-byte aligned. 00709 ArgsSize = (ArgsSize+7) & ~7; 00710 00711 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 00712 00713 // Create local copies for byval args. 00714 SmallVector<SDValue, 8> ByValArgs; 00715 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 00716 ISD::ArgFlagsTy Flags = Outs[i].Flags; 00717 if (!Flags.isByVal()) 00718 continue; 00719 00720 SDValue Arg = OutVals[i]; 00721 unsigned Size = Flags.getByValSize(); 00722 unsigned Align = Flags.getByValAlign(); 00723 00724 int FI = MFI->CreateStackObject(Size, Align, false); 00725 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); 00726 SDValue SizeNode = DAG.getConstant(Size, MVT::i32); 00727 00728 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align, 00729 false, // isVolatile, 00730 (Size <= 32), // AlwaysInline if size <= 32 00731 MachinePointerInfo(), MachinePointerInfo()); 00732 ByValArgs.push_back(FIPtr); 00733 } 00734 00735 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true), 00736 dl); 00737 00738 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 00739 SmallVector<SDValue, 8> MemOpChains; 00740 00741 const unsigned StackOffset = 92; 00742 bool hasStructRetAttr = false; 00743 // Walk the register/memloc assignments, inserting copies/loads. 00744 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size(); 00745 i != e; 00746 ++i, ++realArgIdx) { 00747 CCValAssign &VA = ArgLocs[i]; 00748 SDValue Arg = OutVals[realArgIdx]; 00749 00750 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 00751 00752 // Use local copy if it is a byval arg. 00753 if (Flags.isByVal()) 00754 Arg = ByValArgs[byvalArgIdx++]; 00755 00756 // Promote the value if needed. 00757 switch (VA.getLocInfo()) { 00758 default: llvm_unreachable("Unknown loc info!"); 00759 case CCValAssign::Full: break; 00760 case CCValAssign::SExt: 00761 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 00762 break; 00763 case CCValAssign::ZExt: 00764 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 00765 break; 00766 case CCValAssign::AExt: 00767 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 00768 break; 00769 case CCValAssign::BCvt: 00770 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 00771 break; 00772 } 00773 00774 if (Flags.isSRet()) { 00775 assert(VA.needsCustom()); 00776 // store SRet argument in %sp+64 00777 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 00778 SDValue PtrOff = DAG.getIntPtrConstant(64); 00779 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 00780 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 00781 MachinePointerInfo(), 00782 false, false, 0)); 00783 hasStructRetAttr = true; 00784 continue; 00785 } 00786 00787 if (VA.needsCustom()) { 00788 assert(VA.getLocVT() == MVT::f64); 00789 00790 if (VA.isMemLoc()) { 00791 unsigned Offset = VA.getLocMemOffset() + StackOffset; 00792 // if it is double-word aligned, just store. 00793 if (Offset % 8 == 0) { 00794 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 00795 SDValue PtrOff = DAG.getIntPtrConstant(Offset); 00796 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 00797 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 00798 MachinePointerInfo(), 00799 false, false, 0)); 00800 continue; 00801 } 00802 } 00803 00804 SDValue StackPtr = DAG.CreateStackTemporary(MVT::f64, MVT::i32); 00805 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 00806 Arg, StackPtr, MachinePointerInfo(), 00807 false, false, 0); 00808 // Sparc is big-endian, so the high part comes first. 00809 SDValue Hi = DAG.getLoad(MVT::i32, dl, Store, StackPtr, 00810 MachinePointerInfo(), false, false, false, 0); 00811 // Increment the pointer to the other half. 00812 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 00813 DAG.getIntPtrConstant(4)); 00814 // Load the low part. 00815 SDValue Lo = DAG.getLoad(MVT::i32, dl, Store, StackPtr, 00816 MachinePointerInfo(), false, false, false, 0); 00817 00818 if (VA.isRegLoc()) { 00819 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Hi)); 00820 assert(i+1 != e); 00821 CCValAssign &NextVA = ArgLocs[++i]; 00822 if (NextVA.isRegLoc()) { 00823 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Lo)); 00824 } else { 00825 // Store the low part in stack. 00826 unsigned Offset = NextVA.getLocMemOffset() + StackOffset; 00827 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 00828 SDValue PtrOff = DAG.getIntPtrConstant(Offset); 00829 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 00830 MemOpChains.push_back(DAG.getStore(Chain, dl, Lo, PtrOff, 00831 MachinePointerInfo(), 00832 false, false, 0)); 00833 } 00834 } else { 00835 unsigned Offset = VA.getLocMemOffset() + StackOffset; 00836 // Store the high part. 00837 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 00838 SDValue PtrOff = DAG.getIntPtrConstant(Offset); 00839 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 00840 MemOpChains.push_back(DAG.getStore(Chain, dl, Hi, PtrOff, 00841 MachinePointerInfo(), 00842 false, false, 0)); 00843 // Store the low part. 00844 PtrOff = DAG.getIntPtrConstant(Offset+4); 00845 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 00846 MemOpChains.push_back(DAG.getStore(Chain, dl, Lo, PtrOff, 00847 MachinePointerInfo(), 00848 false, false, 0)); 00849 } 00850 continue; 00851 } 00852 00853 // Arguments that can be passed on register must be kept at 00854 // RegsToPass vector 00855 if (VA.isRegLoc()) { 00856 if (VA.getLocVT() != MVT::f32) { 00857 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 00858 continue; 00859 } 00860 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 00861 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 00862 continue; 00863 } 00864 00865 assert(VA.isMemLoc()); 00866 00867 // Create a store off the stack pointer for this argument. 00868 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 00869 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()+StackOffset); 00870 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 00871 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 00872 MachinePointerInfo(), 00873 false, false, 0)); 00874 } 00875 00876 00877 // Emit all stores, make sure the occur before any copies into physregs. 00878 if (!MemOpChains.empty()) 00879 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 00880 00881 // Build a sequence of copy-to-reg nodes chained together with token 00882 // chain and flag operands which copy the outgoing args into registers. 00883 // The InFlag in necessary since all emitted instructions must be 00884 // stuck together. 00885 SDValue InFlag; 00886 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 00887 unsigned Reg = toCallerWindow(RegsToPass[i].first); 00888 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag); 00889 InFlag = Chain.getValue(1); 00890 } 00891 00892 unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0; 00893 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS); 00894 00895 // If the callee is a GlobalAddress node (quite common, every direct call is) 00896 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 00897 // Likewise ExternalSymbol -> TargetExternalSymbol. 00898 unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_) 00899 ? SparcMCExpr::VK_Sparc_WPLT30 : 0); 00900 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 00901 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF); 00902 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 00903 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF); 00904 00905 // Returns a chain & a flag for retval copy to use 00906 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 00907 SmallVector<SDValue, 8> Ops; 00908 Ops.push_back(Chain); 00909 Ops.push_back(Callee); 00910 if (hasStructRetAttr) 00911 Ops.push_back(DAG.getTargetConstant(SRetArgSize, MVT::i32)); 00912 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 00913 Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first), 00914 RegsToPass[i].second.getValueType())); 00915 00916 // Add a register mask operand representing the call-preserved registers. 00917 const SparcRegisterInfo *TRI = 00918 getTargetMachine().getSubtarget<SparcSubtarget>().getRegisterInfo(); 00919 const uint32_t *Mask = ((hasReturnsTwice) 00920 ? TRI->getRTCallPreservedMask(CallConv) 00921 : TRI->getCallPreservedMask(CallConv)); 00922 assert(Mask && "Missing call preserved mask for calling convention"); 00923 Ops.push_back(DAG.getRegisterMask(Mask)); 00924 00925 if (InFlag.getNode()) 00926 Ops.push_back(InFlag); 00927 00928 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops); 00929 InFlag = Chain.getValue(1); 00930 00931 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true), 00932 DAG.getIntPtrConstant(0, true), InFlag, dl); 00933 InFlag = Chain.getValue(1); 00934 00935 // Assign locations to each value returned by this call. 00936 SmallVector<CCValAssign, 16> RVLocs; 00937 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 00938 *DAG.getContext()); 00939 00940 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32); 00941 00942 // Copy all of the result registers out of their specified physreg. 00943 for (unsigned i = 0; i != RVLocs.size(); ++i) { 00944 Chain = DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), 00945 RVLocs[i].getValVT(), InFlag).getValue(1); 00946 InFlag = Chain.getValue(2); 00947 InVals.push_back(Chain.getValue(0)); 00948 } 00949 00950 return Chain; 00951 } 00952 00953 // This functions returns true if CalleeName is a ABI function that returns 00954 // a long double (fp128). 00955 static bool isFP128ABICall(const char *CalleeName) 00956 { 00957 static const char *const ABICalls[] = 00958 { "_Q_add", "_Q_sub", "_Q_mul", "_Q_div", 00959 "_Q_sqrt", "_Q_neg", 00960 "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq", 00961 "_Q_lltoq", "_Q_ulltoq", 00962 nullptr 00963 }; 00964 for (const char * const *I = ABICalls; *I != nullptr; ++I) 00965 if (strcmp(CalleeName, *I) == 0) 00966 return true; 00967 return false; 00968 } 00969 00970 unsigned 00971 SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const 00972 { 00973 const Function *CalleeFn = nullptr; 00974 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 00975 CalleeFn = dyn_cast<Function>(G->getGlobal()); 00976 } else if (ExternalSymbolSDNode *E = 00977 dyn_cast<ExternalSymbolSDNode>(Callee)) { 00978 const Function *Fn = DAG.getMachineFunction().getFunction(); 00979 const Module *M = Fn->getParent(); 00980 const char *CalleeName = E->getSymbol(); 00981 CalleeFn = M->getFunction(CalleeName); 00982 if (!CalleeFn && isFP128ABICall(CalleeName)) 00983 return 16; // Return sizeof(fp128) 00984 } 00985 00986 if (!CalleeFn) 00987 return 0; 00988 00989 assert(CalleeFn->hasStructRetAttr() && 00990 "Callee does not have the StructRet attribute."); 00991 00992 PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType()); 00993 Type *ElementTy = Ty->getElementType(); 00994 return getDataLayout()->getTypeAllocSize(ElementTy); 00995 } 00996 00997 00998 // Fixup floating point arguments in the ... part of a varargs call. 00999 // 01000 // The SPARC v9 ABI requires that floating point arguments are treated the same 01001 // as integers when calling a varargs function. This does not apply to the 01002 // fixed arguments that are part of the function's prototype. 01003 // 01004 // This function post-processes a CCValAssign array created by 01005 // AnalyzeCallOperands(). 01006 static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs, 01007 ArrayRef<ISD::OutputArg> Outs) { 01008 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 01009 const CCValAssign &VA = ArgLocs[i]; 01010 MVT ValTy = VA.getLocVT(); 01011 // FIXME: What about f32 arguments? C promotes them to f64 when calling 01012 // varargs functions. 01013 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128)) 01014 continue; 01015 // The fixed arguments to a varargs function still go in FP registers. 01016 if (Outs[VA.getValNo()].IsFixed) 01017 continue; 01018 01019 // This floating point argument should be reassigned. 01020 CCValAssign NewVA; 01021 01022 // Determine the offset into the argument array. 01023 unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0; 01024 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16; 01025 unsigned Offset = argSize * (VA.getLocReg() - firstReg); 01026 assert(Offset < 16*8 && "Offset out of range, bad register enum?"); 01027 01028 if (Offset < 6*8) { 01029 // This argument should go in %i0-%i5. 01030 unsigned IReg = SP::I0 + Offset/8; 01031 if (ValTy == MVT::f64) 01032 // Full register, just bitconvert into i64. 01033 NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), 01034 IReg, MVT::i64, CCValAssign::BCvt); 01035 else { 01036 assert(ValTy == MVT::f128 && "Unexpected type!"); 01037 // Full register, just bitconvert into i128 -- We will lower this into 01038 // two i64s in LowerCall_64. 01039 NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), 01040 IReg, MVT::i128, CCValAssign::BCvt); 01041 } 01042 } else { 01043 // This needs to go to memory, we're out of integer registers. 01044 NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), 01045 Offset, VA.getLocVT(), VA.getLocInfo()); 01046 } 01047 ArgLocs[i] = NewVA; 01048 } 01049 } 01050 01051 // Lower a call for the 64-bit ABI. 01052 SDValue 01053 SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, 01054 SmallVectorImpl<SDValue> &InVals) const { 01055 SelectionDAG &DAG = CLI.DAG; 01056 SDLoc DL = CLI.DL; 01057 SDValue Chain = CLI.Chain; 01058 01059 // Sparc target does not yet support tail call optimization. 01060 CLI.IsTailCall = false; 01061 01062 // Analyze operands of the call, assigning locations to each operand. 01063 SmallVector<CCValAssign, 16> ArgLocs; 01064 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs, 01065 *DAG.getContext()); 01066 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64); 01067 01068 // Get the size of the outgoing arguments stack space requirement. 01069 // The stack offset computed by CC_Sparc64 includes all arguments. 01070 // Called functions expect 6 argument words to exist in the stack frame, used 01071 // or not. 01072 unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset()); 01073 01074 // Keep stack frames 16-byte aligned. 01075 ArgsSize = RoundUpToAlignment(ArgsSize, 16); 01076 01077 // Varargs calls require special treatment. 01078 if (CLI.IsVarArg) 01079 fixupVariableFloatArgs(ArgLocs, CLI.Outs); 01080 01081 // Adjust the stack pointer to make room for the arguments. 01082 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls 01083 // with more than 6 arguments. 01084 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true), 01085 DL); 01086 01087 // Collect the set of registers to pass to the function and their values. 01088 // This will be emitted as a sequence of CopyToReg nodes glued to the call 01089 // instruction. 01090 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 01091 01092 // Collect chains from all the memory opeations that copy arguments to the 01093 // stack. They must follow the stack pointer adjustment above and precede the 01094 // call instruction itself. 01095 SmallVector<SDValue, 8> MemOpChains; 01096 01097 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 01098 const CCValAssign &VA = ArgLocs[i]; 01099 SDValue Arg = CLI.OutVals[i]; 01100 01101 // Promote the value if needed. 01102 switch (VA.getLocInfo()) { 01103 default: 01104 llvm_unreachable("Unknown location info!"); 01105 case CCValAssign::Full: 01106 break; 01107 case CCValAssign::SExt: 01108 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 01109 break; 01110 case CCValAssign::ZExt: 01111 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 01112 break; 01113 case CCValAssign::AExt: 01114 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 01115 break; 01116 case CCValAssign::BCvt: 01117 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But 01118 // SPARC does not support i128 natively. Lower it into two i64, see below. 01119 if (!VA.needsCustom() || VA.getValVT() != MVT::f128 01120 || VA.getLocVT() != MVT::i128) 01121 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 01122 break; 01123 } 01124 01125 if (VA.isRegLoc()) { 01126 if (VA.needsCustom() && VA.getValVT() == MVT::f128 01127 && VA.getLocVT() == MVT::i128) { 01128 // Store and reload into the interger register reg and reg+1. 01129 unsigned Offset = 8 * (VA.getLocReg() - SP::I0); 01130 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128; 01131 SDValue StackPtr = DAG.getRegister(SP::O6, getPointerTy()); 01132 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset); 01133 HiPtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, 01134 HiPtrOff); 01135 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8); 01136 LoPtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, 01137 LoPtrOff); 01138 01139 // Store to %sp+BIAS+128+Offset 01140 SDValue Store = DAG.getStore(Chain, DL, Arg, HiPtrOff, 01141 MachinePointerInfo(), 01142 false, false, 0); 01143 // Load into Reg and Reg+1 01144 SDValue Hi64 = DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, 01145 MachinePointerInfo(), 01146 false, false, false, 0); 01147 SDValue Lo64 = DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, 01148 MachinePointerInfo(), 01149 false, false, false, 0); 01150 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), 01151 Hi64)); 01152 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1), 01153 Lo64)); 01154 continue; 01155 } 01156 01157 // The custom bit on an i32 return value indicates that it should be 01158 // passed in the high bits of the register. 01159 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { 01160 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg, 01161 DAG.getConstant(32, MVT::i32)); 01162 01163 // The next value may go in the low bits of the same register. 01164 // Handle both at once. 01165 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() && 01166 ArgLocs[i+1].getLocReg() == VA.getLocReg()) { 01167 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, 01168 CLI.OutVals[i+1]); 01169 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV); 01170 // Skip the next value, it's already done. 01171 ++i; 01172 } 01173 } 01174 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg)); 01175 continue; 01176 } 01177 01178 assert(VA.isMemLoc()); 01179 01180 // Create a store off the stack pointer for this argument. 01181 SDValue StackPtr = DAG.getRegister(SP::O6, getPointerTy()); 01182 // The argument area starts at %fp+BIAS+128 in the callee frame, 01183 // %sp+BIAS+128 in ours. 01184 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + 01185 Subtarget->getStackPointerBias() + 01186 128); 01187 PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, PtrOff); 01188 MemOpChains.push_back(DAG.getStore(Chain, DL, Arg, PtrOff, 01189 MachinePointerInfo(), 01190 false, false, 0)); 01191 } 01192 01193 // Emit all stores, make sure they occur before the call. 01194 if (!MemOpChains.empty()) 01195 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 01196 01197 // Build a sequence of CopyToReg nodes glued together with token chain and 01198 // glue operands which copy the outgoing args into registers. The InGlue is 01199 // necessary since all emitted instructions must be stuck together in order 01200 // to pass the live physical registers. 01201 SDValue InGlue; 01202 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 01203 Chain = DAG.getCopyToReg(Chain, DL, 01204 RegsToPass[i].first, RegsToPass[i].second, InGlue); 01205 InGlue = Chain.getValue(1); 01206 } 01207 01208 // If the callee is a GlobalAddress node (quite common, every direct call is) 01209 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 01210 // Likewise ExternalSymbol -> TargetExternalSymbol. 01211 SDValue Callee = CLI.Callee; 01212 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS); 01213 unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_) 01214 ? SparcMCExpr::VK_Sparc_WPLT30 : 0); 01215 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 01216 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy(), 0, 01217 TF); 01218 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 01219 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy(), TF); 01220 01221 // Build the operands for the call instruction itself. 01222 SmallVector<SDValue, 8> Ops; 01223 Ops.push_back(Chain); 01224 Ops.push_back(Callee); 01225 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 01226 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 01227 RegsToPass[i].second.getValueType())); 01228 01229 // Add a register mask operand representing the call-preserved registers. 01230 const SparcRegisterInfo *TRI = 01231 getTargetMachine().getSubtarget<SparcSubtarget>().getRegisterInfo(); 01232 const uint32_t *Mask = 01233 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv) 01234 : TRI->getCallPreservedMask(CLI.CallConv)); 01235 assert(Mask && "Missing call preserved mask for calling convention"); 01236 Ops.push_back(DAG.getRegisterMask(Mask)); 01237 01238 // Make sure the CopyToReg nodes are glued to the call instruction which 01239 // consumes the registers. 01240 if (InGlue.getNode()) 01241 Ops.push_back(InGlue); 01242 01243 // Now the call itself. 01244 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 01245 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops); 01246 InGlue = Chain.getValue(1); 01247 01248 // Revert the stack pointer immediately after the call. 01249 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true), 01250 DAG.getIntPtrConstant(0, true), InGlue, DL); 01251 InGlue = Chain.getValue(1); 01252 01253 // Now extract the return values. This is more or less the same as 01254 // LowerFormalArguments_64. 01255 01256 // Assign locations to each value returned by this call. 01257 SmallVector<CCValAssign, 16> RVLocs; 01258 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs, 01259 *DAG.getContext()); 01260 01261 // Set inreg flag manually for codegen generated library calls that 01262 // return float. 01263 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && CLI.CS == nullptr) 01264 CLI.Ins[0].Flags.setInReg(); 01265 01266 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64); 01267 01268 // Copy all of the result registers out of their specified physreg. 01269 for (unsigned i = 0; i != RVLocs.size(); ++i) { 01270 CCValAssign &VA = RVLocs[i]; 01271 unsigned Reg = toCallerWindow(VA.getLocReg()); 01272 01273 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can 01274 // reside in the same register in the high and low bits. Reuse the 01275 // CopyFromReg previous node to avoid duplicate copies. 01276 SDValue RV; 01277 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1))) 01278 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg) 01279 RV = Chain.getValue(0); 01280 01281 // But usually we'll create a new CopyFromReg for a different register. 01282 if (!RV.getNode()) { 01283 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue); 01284 Chain = RV.getValue(1); 01285 InGlue = Chain.getValue(2); 01286 } 01287 01288 // Get the high bits for i32 struct elements. 01289 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) 01290 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV, 01291 DAG.getConstant(32, MVT::i32)); 01292 01293 // The callee promoted the return value, so insert an Assert?ext SDNode so 01294 // we won't promote the value again in this function. 01295 switch (VA.getLocInfo()) { 01296 case CCValAssign::SExt: 01297 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV, 01298 DAG.getValueType(VA.getValVT())); 01299 break; 01300 case CCValAssign::ZExt: 01301 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV, 01302 DAG.getValueType(VA.getValVT())); 01303 break; 01304 default: 01305 break; 01306 } 01307 01308 // Truncate the register down to the return value type. 01309 if (VA.isExtInLoc()) 01310 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV); 01311 01312 InVals.push_back(RV); 01313 } 01314 01315 return Chain; 01316 } 01317 01318 //===----------------------------------------------------------------------===// 01319 // TargetLowering Implementation 01320 //===----------------------------------------------------------------------===// 01321 01322 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC 01323 /// condition. 01324 static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) { 01325 switch (CC) { 01326 default: llvm_unreachable("Unknown integer condition code!"); 01327 case ISD::SETEQ: return SPCC::ICC_E; 01328 case ISD::SETNE: return SPCC::ICC_NE; 01329 case ISD::SETLT: return SPCC::ICC_L; 01330 case ISD::SETGT: return SPCC::ICC_G; 01331 case ISD::SETLE: return SPCC::ICC_LE; 01332 case ISD::SETGE: return SPCC::ICC_GE; 01333 case ISD::SETULT: return SPCC::ICC_CS; 01334 case ISD::SETULE: return SPCC::ICC_LEU; 01335 case ISD::SETUGT: return SPCC::ICC_GU; 01336 case ISD::SETUGE: return SPCC::ICC_CC; 01337 } 01338 } 01339 01340 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC 01341 /// FCC condition. 01342 static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) { 01343 switch (CC) { 01344 default: llvm_unreachable("Unknown fp condition code!"); 01345 case ISD::SETEQ: 01346 case ISD::SETOEQ: return SPCC::FCC_E; 01347 case ISD::SETNE: 01348 case ISD::SETUNE: return SPCC::FCC_NE; 01349 case ISD::SETLT: 01350 case ISD::SETOLT: return SPCC::FCC_L; 01351 case ISD::SETGT: 01352 case ISD::SETOGT: return SPCC::FCC_G; 01353 case ISD::SETLE: 01354 case ISD::SETOLE: return SPCC::FCC_LE; 01355 case ISD::SETGE: 01356 case ISD::SETOGE: return SPCC::FCC_GE; 01357 case ISD::SETULT: return SPCC::FCC_UL; 01358 case ISD::SETULE: return SPCC::FCC_ULE; 01359 case ISD::SETUGT: return SPCC::FCC_UG; 01360 case ISD::SETUGE: return SPCC::FCC_UGE; 01361 case ISD::SETUO: return SPCC::FCC_U; 01362 case ISD::SETO: return SPCC::FCC_O; 01363 case ISD::SETONE: return SPCC::FCC_LG; 01364 case ISD::SETUEQ: return SPCC::FCC_UE; 01365 } 01366 } 01367 01368 SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) 01369 : TargetLowering(TM, new SparcELFTargetObjectFile()) { 01370 Subtarget = &TM.getSubtarget<SparcSubtarget>(); 01371 01372 // Set up the register classes. 01373 addRegisterClass(MVT::i32, &SP::IntRegsRegClass); 01374 addRegisterClass(MVT::f32, &SP::FPRegsRegClass); 01375 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass); 01376 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass); 01377 if (Subtarget->is64Bit()) 01378 addRegisterClass(MVT::i64, &SP::I64RegsRegClass); 01379 01380 // Turn FP extload into load/fextend 01381 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 01382 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand); 01383 01384 // Sparc doesn't have i1 sign extending load 01385 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 01386 01387 // Turn FP truncstore into trunc + store. 01388 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 01389 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 01390 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 01391 01392 // Custom legalize GlobalAddress nodes into LO/HI parts. 01393 setOperationAction(ISD::GlobalAddress, getPointerTy(), Custom); 01394 setOperationAction(ISD::GlobalTLSAddress, getPointerTy(), Custom); 01395 setOperationAction(ISD::ConstantPool, getPointerTy(), Custom); 01396 setOperationAction(ISD::BlockAddress, getPointerTy(), Custom); 01397 01398 // Sparc doesn't have sext_inreg, replace them with shl/sra 01399 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 01400 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand); 01401 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 01402 01403 // Sparc has no REM or DIVREM operations. 01404 setOperationAction(ISD::UREM, MVT::i32, Expand); 01405 setOperationAction(ISD::SREM, MVT::i32, Expand); 01406 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 01407 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 01408 01409 // ... nor does SparcV9. 01410 if (Subtarget->is64Bit()) { 01411 setOperationAction(ISD::UREM, MVT::i64, Expand); 01412 setOperationAction(ISD::SREM, MVT::i64, Expand); 01413 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 01414 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 01415 } 01416 01417 // Custom expand fp<->sint 01418 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 01419 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 01420 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 01421 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 01422 01423 // Custom Expand fp<->uint 01424 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 01425 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 01426 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 01427 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 01428 01429 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 01430 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 01431 01432 // Sparc has no select or setcc: expand to SELECT_CC. 01433 setOperationAction(ISD::SELECT, MVT::i32, Expand); 01434 setOperationAction(ISD::SELECT, MVT::f32, Expand); 01435 setOperationAction(ISD::SELECT, MVT::f64, Expand); 01436 setOperationAction(ISD::SELECT, MVT::f128, Expand); 01437 01438 setOperationAction(ISD::SETCC, MVT::i32, Expand); 01439 setOperationAction(ISD::SETCC, MVT::f32, Expand); 01440 setOperationAction(ISD::SETCC, MVT::f64, Expand); 01441 setOperationAction(ISD::SETCC, MVT::f128, Expand); 01442 01443 // Sparc doesn't have BRCOND either, it has BR_CC. 01444 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 01445 setOperationAction(ISD::BRIND, MVT::Other, Expand); 01446 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 01447 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 01448 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 01449 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 01450 setOperationAction(ISD::BR_CC, MVT::f128, Custom); 01451 01452 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 01453 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 01454 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 01455 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); 01456 01457 if (Subtarget->is64Bit()) { 01458 setOperationAction(ISD::ADDC, MVT::i64, Custom); 01459 setOperationAction(ISD::ADDE, MVT::i64, Custom); 01460 setOperationAction(ISD::SUBC, MVT::i64, Custom); 01461 setOperationAction(ISD::SUBE, MVT::i64, Custom); 01462 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 01463 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 01464 setOperationAction(ISD::SELECT, MVT::i64, Expand); 01465 setOperationAction(ISD::SETCC, MVT::i64, Expand); 01466 setOperationAction(ISD::BR_CC, MVT::i64, Custom); 01467 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); 01468 01469 setOperationAction(ISD::CTPOP, MVT::i64, 01470 Subtarget->usePopc() ? Legal : Expand); 01471 setOperationAction(ISD::CTTZ , MVT::i64, Expand); 01472 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 01473 setOperationAction(ISD::CTLZ , MVT::i64, Expand); 01474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 01475 setOperationAction(ISD::BSWAP, MVT::i64, Expand); 01476 setOperationAction(ISD::ROTL , MVT::i64, Expand); 01477 setOperationAction(ISD::ROTR , MVT::i64, Expand); 01478 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); 01479 } 01480 01481 // ATOMICs. 01482 // FIXME: We insert fences for each atomics and generate sub-optimal code 01483 // for PSO/TSO. Also, implement other atomicrmw operations. 01484 01485 setInsertFencesForAtomic(true); 01486 01487 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal); 01488 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, 01489 (Subtarget->isV9() ? Legal: Expand)); 01490 01491 01492 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal); 01493 01494 // Custom Lower Atomic LOAD/STORE 01495 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 01496 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 01497 01498 if (Subtarget->is64Bit()) { 01499 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal); 01500 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal); 01501 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 01502 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom); 01503 } 01504 01505 if (!Subtarget->isV9()) { 01506 // SparcV8 does not have FNEGD and FABSD. 01507 setOperationAction(ISD::FNEG, MVT::f64, Custom); 01508 setOperationAction(ISD::FABS, MVT::f64, Custom); 01509 } 01510 01511 setOperationAction(ISD::FSIN , MVT::f128, Expand); 01512 setOperationAction(ISD::FCOS , MVT::f128, Expand); 01513 setOperationAction(ISD::FSINCOS, MVT::f128, Expand); 01514 setOperationAction(ISD::FREM , MVT::f128, Expand); 01515 setOperationAction(ISD::FMA , MVT::f128, Expand); 01516 setOperationAction(ISD::FSIN , MVT::f64, Expand); 01517 setOperationAction(ISD::FCOS , MVT::f64, Expand); 01518 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 01519 setOperationAction(ISD::FREM , MVT::f64, Expand); 01520 setOperationAction(ISD::FMA , MVT::f64, Expand); 01521 setOperationAction(ISD::FSIN , MVT::f32, Expand); 01522 setOperationAction(ISD::FCOS , MVT::f32, Expand); 01523 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 01524 setOperationAction(ISD::FREM , MVT::f32, Expand); 01525 setOperationAction(ISD::FMA , MVT::f32, Expand); 01526 setOperationAction(ISD::CTTZ , MVT::i32, Expand); 01527 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 01528 setOperationAction(ISD::CTLZ , MVT::i32, Expand); 01529 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 01530 setOperationAction(ISD::ROTL , MVT::i32, Expand); 01531 setOperationAction(ISD::ROTR , MVT::i32, Expand); 01532 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 01533 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 01534 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 01535 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 01536 setOperationAction(ISD::FPOW , MVT::f128, Expand); 01537 setOperationAction(ISD::FPOW , MVT::f64, Expand); 01538 setOperationAction(ISD::FPOW , MVT::f32, Expand); 01539 01540 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 01541 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 01542 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 01543 01544 // FIXME: Sparc provides these multiplies, but we don't have them yet. 01545 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 01546 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 01547 01548 if (Subtarget->is64Bit()) { 01549 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 01550 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 01551 setOperationAction(ISD::MULHU, MVT::i64, Expand); 01552 setOperationAction(ISD::MULHS, MVT::i64, Expand); 01553 01554 setOperationAction(ISD::UMULO, MVT::i64, Custom); 01555 setOperationAction(ISD::SMULO, MVT::i64, Custom); 01556 01557 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 01558 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 01559 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 01560 } 01561 01562 // VASTART needs to be custom lowered to use the VarArgsFrameIndex. 01563 setOperationAction(ISD::VASTART , MVT::Other, Custom); 01564 // VAARG needs to be lowered to not do unaligned accesses for doubles. 01565 setOperationAction(ISD::VAARG , MVT::Other, Custom); 01566 01567 setOperationAction(ISD::TRAP , MVT::Other, Legal); 01568 01569 // Use the default implementation. 01570 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 01571 setOperationAction(ISD::VAEND , MVT::Other, Expand); 01572 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 01573 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand); 01574 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 01575 01576 setExceptionPointerRegister(SP::I0); 01577 setExceptionSelectorRegister(SP::I1); 01578 01579 setStackPointerRegisterToSaveRestore(SP::O6); 01580 01581 setOperationAction(ISD::CTPOP, MVT::i32, 01582 Subtarget->usePopc() ? Legal : Expand); 01583 01584 if (Subtarget->isV9() && Subtarget->hasHardQuad()) { 01585 setOperationAction(ISD::LOAD, MVT::f128, Legal); 01586 setOperationAction(ISD::STORE, MVT::f128, Legal); 01587 } else { 01588 setOperationAction(ISD::LOAD, MVT::f128, Custom); 01589 setOperationAction(ISD::STORE, MVT::f128, Custom); 01590 } 01591 01592 if (Subtarget->hasHardQuad()) { 01593 setOperationAction(ISD::FADD, MVT::f128, Legal); 01594 setOperationAction(ISD::FSUB, MVT::f128, Legal); 01595 setOperationAction(ISD::FMUL, MVT::f128, Legal); 01596 setOperationAction(ISD::FDIV, MVT::f128, Legal); 01597 setOperationAction(ISD::FSQRT, MVT::f128, Legal); 01598 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 01599 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); 01600 if (Subtarget->isV9()) { 01601 setOperationAction(ISD::FNEG, MVT::f128, Legal); 01602 setOperationAction(ISD::FABS, MVT::f128, Legal); 01603 } else { 01604 setOperationAction(ISD::FNEG, MVT::f128, Custom); 01605 setOperationAction(ISD::FABS, MVT::f128, Custom); 01606 } 01607 01608 if (!Subtarget->is64Bit()) { 01609 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll"); 01610 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull"); 01611 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq"); 01612 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq"); 01613 } 01614 01615 } else { 01616 // Custom legalize f128 operations. 01617 01618 setOperationAction(ISD::FADD, MVT::f128, Custom); 01619 setOperationAction(ISD::FSUB, MVT::f128, Custom); 01620 setOperationAction(ISD::FMUL, MVT::f128, Custom); 01621 setOperationAction(ISD::FDIV, MVT::f128, Custom); 01622 setOperationAction(ISD::FSQRT, MVT::f128, Custom); 01623 setOperationAction(ISD::FNEG, MVT::f128, Custom); 01624 setOperationAction(ISD::FABS, MVT::f128, Custom); 01625 01626 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); 01627 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); 01628 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 01629 01630 // Setup Runtime library names. 01631 if (Subtarget->is64Bit()) { 01632 setLibcallName(RTLIB::ADD_F128, "_Qp_add"); 01633 setLibcallName(RTLIB::SUB_F128, "_Qp_sub"); 01634 setLibcallName(RTLIB::MUL_F128, "_Qp_mul"); 01635 setLibcallName(RTLIB::DIV_F128, "_Qp_div"); 01636 setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt"); 01637 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi"); 01638 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui"); 01639 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq"); 01640 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq"); 01641 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox"); 01642 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux"); 01643 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq"); 01644 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq"); 01645 setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq"); 01646 setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq"); 01647 setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos"); 01648 setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod"); 01649 } else { 01650 setLibcallName(RTLIB::ADD_F128, "_Q_add"); 01651 setLibcallName(RTLIB::SUB_F128, "_Q_sub"); 01652 setLibcallName(RTLIB::MUL_F128, "_Q_mul"); 01653 setLibcallName(RTLIB::DIV_F128, "_Q_div"); 01654 setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt"); 01655 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi"); 01656 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou"); 01657 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq"); 01658 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq"); 01659 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll"); 01660 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull"); 01661 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq"); 01662 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq"); 01663 setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq"); 01664 setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq"); 01665 setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos"); 01666 setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod"); 01667 } 01668 } 01669 01670 setMinFunctionAlignment(2); 01671 01672 computeRegisterProperties(); 01673 } 01674 01675 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const { 01676 switch (Opcode) { 01677 default: return nullptr; 01678 case SPISD::CMPICC: return "SPISD::CMPICC"; 01679 case SPISD::CMPFCC: return "SPISD::CMPFCC"; 01680 case SPISD::BRICC: return "SPISD::BRICC"; 01681 case SPISD::BRXCC: return "SPISD::BRXCC"; 01682 case SPISD::BRFCC: return "SPISD::BRFCC"; 01683 case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC"; 01684 case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC"; 01685 case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC"; 01686 case SPISD::Hi: return "SPISD::Hi"; 01687 case SPISD::Lo: return "SPISD::Lo"; 01688 case SPISD::FTOI: return "SPISD::FTOI"; 01689 case SPISD::ITOF: return "SPISD::ITOF"; 01690 case SPISD::FTOX: return "SPISD::FTOX"; 01691 case SPISD::XTOF: return "SPISD::XTOF"; 01692 case SPISD::CALL: return "SPISD::CALL"; 01693 case SPISD::RET_FLAG: return "SPISD::RET_FLAG"; 01694 case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG"; 01695 case SPISD::FLUSHW: return "SPISD::FLUSHW"; 01696 case SPISD::TLS_ADD: return "SPISD::TLS_ADD"; 01697 case SPISD::TLS_LD: return "SPISD::TLS_LD"; 01698 case SPISD::TLS_CALL: return "SPISD::TLS_CALL"; 01699 } 01700 } 01701 01702 EVT SparcTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 01703 if (!VT.isVector()) 01704 return MVT::i32; 01705 return VT.changeVectorElementTypeToInteger(); 01706 } 01707 01708 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to 01709 /// be zero. Op is expected to be a target specific node. Used by DAG 01710 /// combiner. 01711 void SparcTargetLowering::computeKnownBitsForTargetNode 01712 (const SDValue Op, 01713 APInt &KnownZero, 01714 APInt &KnownOne, 01715 const SelectionDAG &DAG, 01716 unsigned Depth) const { 01717 APInt KnownZero2, KnownOne2; 01718 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 01719 01720 switch (Op.getOpcode()) { 01721 default: break; 01722 case SPISD::SELECT_ICC: 01723 case SPISD::SELECT_XCC: 01724 case SPISD::SELECT_FCC: 01725 DAG.computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 01726 DAG.computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 01727 01728 // Only known if known in both the LHS and RHS. 01729 KnownOne &= KnownOne2; 01730 KnownZero &= KnownZero2; 01731 break; 01732 } 01733 } 01734 01735 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so 01736 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition. 01737 static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, 01738 ISD::CondCode CC, unsigned &SPCC) { 01739 if (isa<ConstantSDNode>(RHS) && 01740 cast<ConstantSDNode>(RHS)->isNullValue() && 01741 CC == ISD::SETNE && 01742 (((LHS.getOpcode() == SPISD::SELECT_ICC || 01743 LHS.getOpcode() == SPISD::SELECT_XCC) && 01744 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) || 01745 (LHS.getOpcode() == SPISD::SELECT_FCC && 01746 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) && 01747 isa<ConstantSDNode>(LHS.getOperand(0)) && 01748 isa<ConstantSDNode>(LHS.getOperand(1)) && 01749 cast<ConstantSDNode>(LHS.getOperand(0))->isOne() && 01750 cast<ConstantSDNode>(LHS.getOperand(1))->isNullValue()) { 01751 SDValue CMPCC = LHS.getOperand(3); 01752 SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue(); 01753 LHS = CMPCC.getOperand(0); 01754 RHS = CMPCC.getOperand(1); 01755 } 01756 } 01757 01758 // Convert to a target node and set target flags. 01759 SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF, 01760 SelectionDAG &DAG) const { 01761 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) 01762 return DAG.getTargetGlobalAddress(GA->getGlobal(), 01763 SDLoc(GA), 01764 GA->getValueType(0), 01765 GA->getOffset(), TF); 01766 01767 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) 01768 return DAG.getTargetConstantPool(CP->getConstVal(), 01769 CP->getValueType(0), 01770 CP->getAlignment(), 01771 CP->getOffset(), TF); 01772 01773 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) 01774 return DAG.getTargetBlockAddress(BA->getBlockAddress(), 01775 Op.getValueType(), 01776 0, 01777 TF); 01778 01779 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) 01780 return DAG.getTargetExternalSymbol(ES->getSymbol(), 01781 ES->getValueType(0), TF); 01782 01783 llvm_unreachable("Unhandled address SDNode"); 01784 } 01785 01786 // Split Op into high and low parts according to HiTF and LoTF. 01787 // Return an ADD node combining the parts. 01788 SDValue SparcTargetLowering::makeHiLoPair(SDValue Op, 01789 unsigned HiTF, unsigned LoTF, 01790 SelectionDAG &DAG) const { 01791 SDLoc DL(Op); 01792 EVT VT = Op.getValueType(); 01793 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG)); 01794 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG)); 01795 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); 01796 } 01797 01798 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool, 01799 // or ExternalSymbol SDNode. 01800 SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const { 01801 SDLoc DL(Op); 01802 EVT VT = getPointerTy(); 01803 01804 // Handle PIC mode first. 01805 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 01806 // This is the pic32 code model, the GOT is known to be smaller than 4GB. 01807 SDValue HiLo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_GOT22, 01808 SparcMCExpr::VK_Sparc_GOT10, DAG); 01809 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT); 01810 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo); 01811 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this 01812 // function has calls. 01813 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 01814 MFI->setHasCalls(true); 01815 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr, 01816 MachinePointerInfo::getGOT(), false, false, false, 0); 01817 } 01818 01819 // This is one of the absolute code models. 01820 switch(getTargetMachine().getCodeModel()) { 01821 default: 01822 llvm_unreachable("Unsupported absolute code model"); 01823 case CodeModel::Small: 01824 // abs32. 01825 return makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI, 01826 SparcMCExpr::VK_Sparc_LO, DAG); 01827 case CodeModel::Medium: { 01828 // abs44. 01829 SDValue H44 = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_H44, 01830 SparcMCExpr::VK_Sparc_M44, DAG); 01831 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, MVT::i32)); 01832 SDValue L44 = withTargetFlags(Op, SparcMCExpr::VK_Sparc_L44, DAG); 01833 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44); 01834 return DAG.getNode(ISD::ADD, DL, VT, H44, L44); 01835 } 01836 case CodeModel::Large: { 01837 // abs64. 01838 SDValue Hi = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HH, 01839 SparcMCExpr::VK_Sparc_HM, DAG); 01840 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, MVT::i32)); 01841 SDValue Lo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI, 01842 SparcMCExpr::VK_Sparc_LO, DAG); 01843 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); 01844 } 01845 } 01846 } 01847 01848 SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op, 01849 SelectionDAG &DAG) const { 01850 return makeAddress(Op, DAG); 01851 } 01852 01853 SDValue SparcTargetLowering::LowerConstantPool(SDValue Op, 01854 SelectionDAG &DAG) const { 01855 return makeAddress(Op, DAG); 01856 } 01857 01858 SDValue SparcTargetLowering::LowerBlockAddress(SDValue Op, 01859 SelectionDAG &DAG) const { 01860 return makeAddress(Op, DAG); 01861 } 01862 01863 SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op, 01864 SelectionDAG &DAG) const { 01865 01866 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 01867 SDLoc DL(GA); 01868 const GlobalValue *GV = GA->getGlobal(); 01869 EVT PtrVT = getPointerTy(); 01870 01871 TLSModel::Model model = getTargetMachine().getTLSModel(GV); 01872 01873 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) { 01874 unsigned HiTF = ((model == TLSModel::GeneralDynamic) 01875 ? SparcMCExpr::VK_Sparc_TLS_GD_HI22 01876 : SparcMCExpr::VK_Sparc_TLS_LDM_HI22); 01877 unsigned LoTF = ((model == TLSModel::GeneralDynamic) 01878 ? SparcMCExpr::VK_Sparc_TLS_GD_LO10 01879 : SparcMCExpr::VK_Sparc_TLS_LDM_LO10); 01880 unsigned addTF = ((model == TLSModel::GeneralDynamic) 01881 ? SparcMCExpr::VK_Sparc_TLS_GD_ADD 01882 : SparcMCExpr::VK_Sparc_TLS_LDM_ADD); 01883 unsigned callTF = ((model == TLSModel::GeneralDynamic) 01884 ? SparcMCExpr::VK_Sparc_TLS_GD_CALL 01885 : SparcMCExpr::VK_Sparc_TLS_LDM_CALL); 01886 01887 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG); 01888 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT); 01889 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo, 01890 withTargetFlags(Op, addTF, DAG)); 01891 01892 SDValue Chain = DAG.getEntryNode(); 01893 SDValue InFlag; 01894 01895 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(1, true), DL); 01896 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag); 01897 InFlag = Chain.getValue(1); 01898 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT); 01899 SDValue Symbol = withTargetFlags(Op, callTF, DAG); 01900 01901 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 01902 SmallVector<SDValue, 4> Ops; 01903 Ops.push_back(Chain); 01904 Ops.push_back(Callee); 01905 Ops.push_back(Symbol); 01906 Ops.push_back(DAG.getRegister(SP::O0, PtrVT)); 01907 const uint32_t *Mask = getTargetMachine() 01908 .getSubtargetImpl() 01909 ->getRegisterInfo() 01910 ->getCallPreservedMask(CallingConv::C); 01911 assert(Mask && "Missing call preserved mask for calling convention"); 01912 Ops.push_back(DAG.getRegisterMask(Mask)); 01913 Ops.push_back(InFlag); 01914 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops); 01915 InFlag = Chain.getValue(1); 01916 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, true), 01917 DAG.getIntPtrConstant(0, true), InFlag, DL); 01918 InFlag = Chain.getValue(1); 01919 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag); 01920 01921 if (model != TLSModel::LocalDynamic) 01922 return Ret; 01923 01924 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT, 01925 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22, DAG)); 01926 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT, 01927 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10, DAG)); 01928 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo); 01929 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo, 01930 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_ADD, DAG)); 01931 } 01932 01933 if (model == TLSModel::InitialExec) { 01934 unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX 01935 : SparcMCExpr::VK_Sparc_TLS_IE_LD); 01936 01937 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT); 01938 01939 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this 01940 // function has calls. 01941 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 01942 MFI->setHasCalls(true); 01943 01944 SDValue TGA = makeHiLoPair(Op, 01945 SparcMCExpr::VK_Sparc_TLS_IE_HI22, 01946 SparcMCExpr::VK_Sparc_TLS_IE_LO10, DAG); 01947 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA); 01948 SDValue Offset = DAG.getNode(SPISD::TLS_LD, 01949 DL, PtrVT, Ptr, 01950 withTargetFlags(Op, ldTF, DAG)); 01951 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, 01952 DAG.getRegister(SP::G7, PtrVT), Offset, 01953 withTargetFlags(Op, 01954 SparcMCExpr::VK_Sparc_TLS_IE_ADD, DAG)); 01955 } 01956 01957 assert(model == TLSModel::LocalExec); 01958 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT, 01959 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_HIX22, DAG)); 01960 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT, 01961 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_LOX10, DAG)); 01962 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo); 01963 01964 return DAG.getNode(ISD::ADD, DL, PtrVT, 01965 DAG.getRegister(SP::G7, PtrVT), Offset); 01966 } 01967 01968 SDValue 01969 SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, 01970 SDValue Arg, SDLoc DL, 01971 SelectionDAG &DAG) const { 01972 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 01973 EVT ArgVT = Arg.getValueType(); 01974 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 01975 01976 ArgListEntry Entry; 01977 Entry.Node = Arg; 01978 Entry.Ty = ArgTy; 01979 01980 if (ArgTy->isFP128Ty()) { 01981 // Create a stack object and pass the pointer to the library function. 01982 int FI = MFI->CreateStackObject(16, 8, false); 01983 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); 01984 Chain = DAG.getStore(Chain, 01985 DL, 01986 Entry.Node, 01987 FIPtr, 01988 MachinePointerInfo(), 01989 false, 01990 false, 01991 8); 01992 01993 Entry.Node = FIPtr; 01994 Entry.Ty = PointerType::getUnqual(ArgTy); 01995 } 01996 Args.push_back(Entry); 01997 return Chain; 01998 } 01999 02000 SDValue 02001 SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG, 02002 const char *LibFuncName, 02003 unsigned numArgs) const { 02004 02005 ArgListTy Args; 02006 02007 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 02008 02009 SDValue Callee = DAG.getExternalSymbol(LibFuncName, getPointerTy()); 02010 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext()); 02011 Type *RetTyABI = RetTy; 02012 SDValue Chain = DAG.getEntryNode(); 02013 SDValue RetPtr; 02014 02015 if (RetTy->isFP128Ty()) { 02016 // Create a Stack Object to receive the return value of type f128. 02017 ArgListEntry Entry; 02018 int RetFI = MFI->CreateStackObject(16, 8, false); 02019 RetPtr = DAG.getFrameIndex(RetFI, getPointerTy()); 02020 Entry.Node = RetPtr; 02021 Entry.Ty = PointerType::getUnqual(RetTy); 02022 if (!Subtarget->is64Bit()) 02023 Entry.isSRet = true; 02024 Entry.isReturned = false; 02025 Args.push_back(Entry); 02026 RetTyABI = Type::getVoidTy(*DAG.getContext()); 02027 } 02028 02029 assert(Op->getNumOperands() >= numArgs && "Not enough operands!"); 02030 for (unsigned i = 0, e = numArgs; i != e; ++i) { 02031 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG); 02032 } 02033 TargetLowering::CallLoweringInfo CLI(DAG); 02034 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain) 02035 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args), 0); 02036 02037 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 02038 02039 // chain is in second result. 02040 if (RetTyABI == RetTy) 02041 return CallInfo.first; 02042 02043 assert (RetTy->isFP128Ty() && "Unexpected return type!"); 02044 02045 Chain = CallInfo.second; 02046 02047 // Load RetPtr to get the return value. 02048 return DAG.getLoad(Op.getValueType(), 02049 SDLoc(Op), 02050 Chain, 02051 RetPtr, 02052 MachinePointerInfo(), 02053 false, false, false, 8); 02054 } 02055 02056 SDValue 02057 SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS, 02058 unsigned &SPCC, 02059 SDLoc DL, 02060 SelectionDAG &DAG) const { 02061 02062 const char *LibCall = nullptr; 02063 bool is64Bit = Subtarget->is64Bit(); 02064 switch(SPCC) { 02065 default: llvm_unreachable("Unhandled conditional code!"); 02066 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break; 02067 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break; 02068 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break; 02069 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break; 02070 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break; 02071 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break; 02072 case SPCC::FCC_UL : 02073 case SPCC::FCC_ULE: 02074 case SPCC::FCC_UG : 02075 case SPCC::FCC_UGE: 02076 case SPCC::FCC_U : 02077 case SPCC::FCC_O : 02078 case SPCC::FCC_LG : 02079 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break; 02080 } 02081 02082 SDValue Callee = DAG.getExternalSymbol(LibCall, getPointerTy()); 02083 Type *RetTy = Type::getInt32Ty(*DAG.getContext()); 02084 ArgListTy Args; 02085 SDValue Chain = DAG.getEntryNode(); 02086 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG); 02087 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG); 02088 02089 TargetLowering::CallLoweringInfo CLI(DAG); 02090 CLI.setDebugLoc(DL).setChain(Chain) 02091 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0); 02092 02093 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 02094 02095 // result is in first, and chain is in second result. 02096 SDValue Result = CallInfo.first; 02097 02098 switch(SPCC) { 02099 default: { 02100 SDValue RHS = DAG.getTargetConstant(0, Result.getValueType()); 02101 SPCC = SPCC::ICC_NE; 02102 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 02103 } 02104 case SPCC::FCC_UL : { 02105 SDValue Mask = DAG.getTargetConstant(1, Result.getValueType()); 02106 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask); 02107 SDValue RHS = DAG.getTargetConstant(0, Result.getValueType()); 02108 SPCC = SPCC::ICC_NE; 02109 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 02110 } 02111 case SPCC::FCC_ULE: { 02112 SDValue RHS = DAG.getTargetConstant(2, Result.getValueType()); 02113 SPCC = SPCC::ICC_NE; 02114 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 02115 } 02116 case SPCC::FCC_UG : { 02117 SDValue RHS = DAG.getTargetConstant(1, Result.getValueType()); 02118 SPCC = SPCC::ICC_G; 02119 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 02120 } 02121 case SPCC::FCC_UGE: { 02122 SDValue RHS = DAG.getTargetConstant(1, Result.getValueType()); 02123 SPCC = SPCC::ICC_NE; 02124 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 02125 } 02126 02127 case SPCC::FCC_U : { 02128 SDValue RHS = DAG.getTargetConstant(3, Result.getValueType()); 02129 SPCC = SPCC::ICC_E; 02130 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 02131 } 02132 case SPCC::FCC_O : { 02133 SDValue RHS = DAG.getTargetConstant(3, Result.getValueType()); 02134 SPCC = SPCC::ICC_NE; 02135 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 02136 } 02137 case SPCC::FCC_LG : { 02138 SDValue Mask = DAG.getTargetConstant(3, Result.getValueType()); 02139 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask); 02140 SDValue RHS = DAG.getTargetConstant(0, Result.getValueType()); 02141 SPCC = SPCC::ICC_NE; 02142 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 02143 } 02144 case SPCC::FCC_UE : { 02145 SDValue Mask = DAG.getTargetConstant(3, Result.getValueType()); 02146 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask); 02147 SDValue RHS = DAG.getTargetConstant(0, Result.getValueType()); 02148 SPCC = SPCC::ICC_E; 02149 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 02150 } 02151 } 02152 } 02153 02154 static SDValue 02155 LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, 02156 const SparcTargetLowering &TLI) { 02157 02158 if (Op.getOperand(0).getValueType() == MVT::f64) 02159 return TLI.LowerF128Op(Op, DAG, 02160 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1); 02161 02162 if (Op.getOperand(0).getValueType() == MVT::f32) 02163 return TLI.LowerF128Op(Op, DAG, 02164 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1); 02165 02166 llvm_unreachable("fpextend with non-float operand!"); 02167 return SDValue(); 02168 } 02169 02170 static SDValue 02171 LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, 02172 const SparcTargetLowering &TLI) { 02173 // FP_ROUND on f64 and f32 are legal. 02174 if (Op.getOperand(0).getValueType() != MVT::f128) 02175 return Op; 02176 02177 if (Op.getValueType() == MVT::f64) 02178 return TLI.LowerF128Op(Op, DAG, 02179 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1); 02180 if (Op.getValueType() == MVT::f32) 02181 return TLI.LowerF128Op(Op, DAG, 02182 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1); 02183 02184 llvm_unreachable("fpround to non-float!"); 02185 return SDValue(); 02186 } 02187 02188 static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, 02189 const SparcTargetLowering &TLI, 02190 bool hasHardQuad) { 02191 SDLoc dl(Op); 02192 EVT VT = Op.getValueType(); 02193 assert(VT == MVT::i32 || VT == MVT::i64); 02194 02195 // Expand f128 operations to fp128 abi calls. 02196 if (Op.getOperand(0).getValueType() == MVT::f128 02197 && (!hasHardQuad || !TLI.isTypeLegal(VT))) { 02198 const char *libName = TLI.getLibcallName(VT == MVT::i32 02199 ? RTLIB::FPTOSINT_F128_I32 02200 : RTLIB::FPTOSINT_F128_I64); 02201 return TLI.LowerF128Op(Op, DAG, libName, 1); 02202 } 02203 02204 // Expand if the resulting type is illegal. 02205 if (!TLI.isTypeLegal(VT)) 02206 return SDValue(); 02207 02208 // Otherwise, Convert the fp value to integer in an FP register. 02209 if (VT == MVT::i32) 02210 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0)); 02211 else 02212 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0)); 02213 02214 return DAG.getNode(ISD::BITCAST, dl, VT, Op); 02215 } 02216 02217 static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, 02218 const SparcTargetLowering &TLI, 02219 bool hasHardQuad) { 02220 SDLoc dl(Op); 02221 EVT OpVT = Op.getOperand(0).getValueType(); 02222 assert(OpVT == MVT::i32 || (OpVT == MVT::i64)); 02223 02224 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64; 02225 02226 // Expand f128 operations to fp128 ABI calls. 02227 if (Op.getValueType() == MVT::f128 02228 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) { 02229 const char *libName = TLI.getLibcallName(OpVT == MVT::i32 02230 ? RTLIB::SINTTOFP_I32_F128 02231 : RTLIB::SINTTOFP_I64_F128); 02232 return TLI.LowerF128Op(Op, DAG, libName, 1); 02233 } 02234 02235 // Expand if the operand type is illegal. 02236 if (!TLI.isTypeLegal(OpVT)) 02237 return SDValue(); 02238 02239 // Otherwise, Convert the int value to FP in an FP register. 02240 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0)); 02241 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF; 02242 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp); 02243 } 02244 02245 static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, 02246 const SparcTargetLowering &TLI, 02247 bool hasHardQuad) { 02248 SDLoc dl(Op); 02249 EVT VT = Op.getValueType(); 02250 02251 // Expand if it does not involve f128 or the target has support for 02252 // quad floating point instructions and the resulting type is legal. 02253 if (Op.getOperand(0).getValueType() != MVT::f128 || 02254 (hasHardQuad && TLI.isTypeLegal(VT))) 02255 return SDValue(); 02256 02257 assert(VT == MVT::i32 || VT == MVT::i64); 02258 02259 return TLI.LowerF128Op(Op, DAG, 02260 TLI.getLibcallName(VT == MVT::i32 02261 ? RTLIB::FPTOUINT_F128_I32 02262 : RTLIB::FPTOUINT_F128_I64), 02263 1); 02264 } 02265 02266 static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, 02267 const SparcTargetLowering &TLI, 02268 bool hasHardQuad) { 02269 SDLoc dl(Op); 02270 EVT OpVT = Op.getOperand(0).getValueType(); 02271 assert(OpVT == MVT::i32 || OpVT == MVT::i64); 02272 02273 // Expand if it does not involve f128 or the target has support for 02274 // quad floating point instructions and the operand type is legal. 02275 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT))) 02276 return SDValue(); 02277 02278 return TLI.LowerF128Op(Op, DAG, 02279 TLI.getLibcallName(OpVT == MVT::i32 02280 ? RTLIB::UINTTOFP_I32_F128 02281 : RTLIB::UINTTOFP_I64_F128), 02282 1); 02283 } 02284 02285 static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, 02286 const SparcTargetLowering &TLI, 02287 bool hasHardQuad) { 02288 SDValue Chain = Op.getOperand(0); 02289 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 02290 SDValue LHS = Op.getOperand(2); 02291 SDValue RHS = Op.getOperand(3); 02292 SDValue Dest = Op.getOperand(4); 02293 SDLoc dl(Op); 02294 unsigned Opc, SPCC = ~0U; 02295 02296 // If this is a br_cc of a "setcc", and if the setcc got lowered into 02297 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values. 02298 LookThroughSetCC(LHS, RHS, CC, SPCC); 02299 02300 // Get the condition flag. 02301 SDValue CompareFlag; 02302 if (LHS.getValueType().isInteger()) { 02303 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS); 02304 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC); 02305 // 32-bit compares use the icc flags, 64-bit uses the xcc flags. 02306 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC; 02307 } else { 02308 if (!hasHardQuad && LHS.getValueType() == MVT::f128) { 02309 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 02310 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG); 02311 Opc = SPISD::BRICC; 02312 } else { 02313 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS); 02314 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 02315 Opc = SPISD::BRFCC; 02316 } 02317 } 02318 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest, 02319 DAG.getConstant(SPCC, MVT::i32), CompareFlag); 02320 } 02321 02322 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, 02323 const SparcTargetLowering &TLI, 02324 bool hasHardQuad) { 02325 SDValue LHS = Op.getOperand(0); 02326 SDValue RHS = Op.getOperand(1); 02327 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 02328 SDValue TrueVal = Op.getOperand(2); 02329 SDValue FalseVal = Op.getOperand(3); 02330 SDLoc dl(Op); 02331 unsigned Opc, SPCC = ~0U; 02332 02333 // If this is a select_cc of a "setcc", and if the setcc got lowered into 02334 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values. 02335 LookThroughSetCC(LHS, RHS, CC, SPCC); 02336 02337 SDValue CompareFlag; 02338 if (LHS.getValueType().isInteger()) { 02339 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS); 02340 Opc = LHS.getValueType() == MVT::i32 ? 02341 SPISD::SELECT_ICC : SPISD::SELECT_XCC; 02342 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC); 02343 } else { 02344 if (!hasHardQuad && LHS.getValueType() == MVT::f128) { 02345 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 02346 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG); 02347 Opc = SPISD::SELECT_ICC; 02348 } else { 02349 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS); 02350 Opc = SPISD::SELECT_FCC; 02351 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 02352 } 02353 } 02354 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal, 02355 DAG.getConstant(SPCC, MVT::i32), CompareFlag); 02356 } 02357 02358 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, 02359 const SparcTargetLowering &TLI) { 02360 MachineFunction &MF = DAG.getMachineFunction(); 02361 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 02362 02363 // Need frame address to find the address of VarArgsFrameIndex. 02364 MF.getFrameInfo()->setFrameAddressIsTaken(true); 02365 02366 // vastart just stores the address of the VarArgsFrameIndex slot into the 02367 // memory location argument. 02368 SDLoc DL(Op); 02369 SDValue Offset = 02370 DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(), 02371 DAG.getRegister(SP::I6, TLI.getPointerTy()), 02372 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset())); 02373 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 02374 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1), 02375 MachinePointerInfo(SV), false, false, 0); 02376 } 02377 02378 static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) { 02379 SDNode *Node = Op.getNode(); 02380 EVT VT = Node->getValueType(0); 02381 SDValue InChain = Node->getOperand(0); 02382 SDValue VAListPtr = Node->getOperand(1); 02383 EVT PtrVT = VAListPtr.getValueType(); 02384 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 02385 SDLoc DL(Node); 02386 SDValue VAList = DAG.getLoad(PtrVT, DL, InChain, VAListPtr, 02387 MachinePointerInfo(SV), false, false, false, 0); 02388 // Increment the pointer, VAList, to the next vaarg. 02389 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, 02390 DAG.getIntPtrConstant(VT.getSizeInBits()/8)); 02391 // Store the incremented VAList to the legalized pointer. 02392 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, 02393 VAListPtr, MachinePointerInfo(SV), false, false, 0); 02394 // Load the actual argument out of the pointer VAList. 02395 // We can't count on greater alignment than the word size. 02396 return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(), 02397 false, false, false, 02398 std::min(PtrVT.getSizeInBits(), VT.getSizeInBits())/8); 02399 } 02400 02401 static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, 02402 const SparcSubtarget *Subtarget) { 02403 SDValue Chain = Op.getOperand(0); // Legalize the chain. 02404 SDValue Size = Op.getOperand(1); // Legalize the size. 02405 EVT VT = Size->getValueType(0); 02406 SDLoc dl(Op); 02407 02408 unsigned SPReg = SP::O6; 02409 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); 02410 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value 02411 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain 02412 02413 // The resultant pointer is actually 16 words from the bottom of the stack, 02414 // to provide a register spill area. 02415 unsigned regSpillArea = Subtarget->is64Bit() ? 128 : 96; 02416 regSpillArea += Subtarget->getStackPointerBias(); 02417 02418 SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP, 02419 DAG.getConstant(regSpillArea, VT)); 02420 SDValue Ops[2] = { NewVal, Chain }; 02421 return DAG.getMergeValues(Ops, dl); 02422 } 02423 02424 02425 static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) { 02426 SDLoc dl(Op); 02427 SDValue Chain = DAG.getNode(SPISD::FLUSHW, 02428 dl, MVT::Other, DAG.getEntryNode()); 02429 return Chain; 02430 } 02431 02432 static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, 02433 const SparcSubtarget *Subtarget) { 02434 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 02435 MFI->setFrameAddressIsTaken(true); 02436 02437 EVT VT = Op.getValueType(); 02438 SDLoc dl(Op); 02439 unsigned FrameReg = SP::I6; 02440 unsigned stackBias = Subtarget->getStackPointerBias(); 02441 02442 SDValue FrameAddr; 02443 02444 if (depth == 0) { 02445 FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 02446 if (Subtarget->is64Bit()) 02447 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 02448 DAG.getIntPtrConstant(stackBias)); 02449 return FrameAddr; 02450 } 02451 02452 // flush first to make sure the windowed registers' values are in stack 02453 SDValue Chain = getFLUSHW(Op, DAG); 02454 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT); 02455 02456 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56; 02457 02458 while (depth--) { 02459 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 02460 DAG.getIntPtrConstant(Offset)); 02461 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo(), 02462 false, false, false, 0); 02463 } 02464 if (Subtarget->is64Bit()) 02465 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 02466 DAG.getIntPtrConstant(stackBias)); 02467 return FrameAddr; 02468 } 02469 02470 02471 static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, 02472 const SparcSubtarget *Subtarget) { 02473 02474 uint64_t depth = Op.getConstantOperandVal(0); 02475 02476 return getFRAMEADDR(depth, Op, DAG, Subtarget); 02477 02478 } 02479 02480 static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, 02481 const SparcTargetLowering &TLI, 02482 const SparcSubtarget *Subtarget) { 02483 MachineFunction &MF = DAG.getMachineFunction(); 02484 MachineFrameInfo *MFI = MF.getFrameInfo(); 02485 MFI->setReturnAddressIsTaken(true); 02486 02487 if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG)) 02488 return SDValue(); 02489 02490 EVT VT = Op.getValueType(); 02491 SDLoc dl(Op); 02492 uint64_t depth = Op.getConstantOperandVal(0); 02493 02494 SDValue RetAddr; 02495 if (depth == 0) { 02496 unsigned RetReg = MF.addLiveIn(SP::I7, 02497 TLI.getRegClassFor(TLI.getPointerTy())); 02498 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT); 02499 return RetAddr; 02500 } 02501 02502 // Need frame address to find return address of the caller. 02503 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget); 02504 02505 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60; 02506 SDValue Ptr = DAG.getNode(ISD::ADD, 02507 dl, VT, 02508 FrameAddr, 02509 DAG.getIntPtrConstant(Offset)); 02510 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, 02511 MachinePointerInfo(), false, false, false, 0); 02512 02513 return RetAddr; 02514 } 02515 02516 static SDValue LowerF64Op(SDValue Op, SelectionDAG &DAG, unsigned opcode) 02517 { 02518 SDLoc dl(Op); 02519 02520 assert(Op.getValueType() == MVT::f64 && "LowerF64Op called on non-double!"); 02521 assert(opcode == ISD::FNEG || opcode == ISD::FABS); 02522 02523 // Lower fneg/fabs on f64 to fneg/fabs on f32. 02524 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd. 02525 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd. 02526 02527 SDValue SrcReg64 = Op.getOperand(0); 02528 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32, 02529 SrcReg64); 02530 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32, 02531 SrcReg64); 02532 02533 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32); 02534 02535 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 02536 dl, MVT::f64), 0); 02537 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64, 02538 DstReg64, Hi32); 02539 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64, 02540 DstReg64, Lo32); 02541 return DstReg64; 02542 } 02543 02544 // Lower a f128 load into two f64 loads. 02545 static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG) 02546 { 02547 SDLoc dl(Op); 02548 LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode()); 02549 assert(LdNode && LdNode->getOffset().getOpcode() == ISD::UNDEF 02550 && "Unexpected node type"); 02551 02552 unsigned alignment = LdNode->getAlignment(); 02553 if (alignment > 8) 02554 alignment = 8; 02555 02556 SDValue Hi64 = DAG.getLoad(MVT::f64, 02557 dl, 02558 LdNode->getChain(), 02559 LdNode->getBasePtr(), 02560 LdNode->getPointerInfo(), 02561 false, false, false, alignment); 02562 EVT addrVT = LdNode->getBasePtr().getValueType(); 02563 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT, 02564 LdNode->getBasePtr(), 02565 DAG.getConstant(8, addrVT)); 02566 SDValue Lo64 = DAG.getLoad(MVT::f64, 02567 dl, 02568 LdNode->getChain(), 02569 LoPtr, 02570 LdNode->getPointerInfo(), 02571 false, false, false, alignment); 02572 02573 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, MVT::i32); 02574 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, MVT::i32); 02575 02576 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 02577 dl, MVT::f128); 02578 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 02579 MVT::f128, 02580 SDValue(InFP128, 0), 02581 Hi64, 02582 SubRegEven); 02583 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 02584 MVT::f128, 02585 SDValue(InFP128, 0), 02586 Lo64, 02587 SubRegOdd); 02588 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1), 02589 SDValue(Lo64.getNode(), 1) }; 02590 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 02591 SDValue Ops[2] = {SDValue(InFP128,0), OutChain}; 02592 return DAG.getMergeValues(Ops, dl); 02593 } 02594 02595 // Lower a f128 store into two f64 stores. 02596 static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) { 02597 SDLoc dl(Op); 02598 StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode()); 02599 assert(StNode && StNode->getOffset().getOpcode() == ISD::UNDEF 02600 && "Unexpected node type"); 02601 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, MVT::i32); 02602 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, MVT::i32); 02603 02604 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, 02605 dl, 02606 MVT::f64, 02607 StNode->getValue(), 02608 SubRegEven); 02609 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, 02610 dl, 02611 MVT::f64, 02612 StNode->getValue(), 02613 SubRegOdd); 02614 02615 unsigned alignment = StNode->getAlignment(); 02616 if (alignment > 8) 02617 alignment = 8; 02618 02619 SDValue OutChains[2]; 02620 OutChains[0] = DAG.getStore(StNode->getChain(), 02621 dl, 02622 SDValue(Hi64, 0), 02623 StNode->getBasePtr(), 02624 MachinePointerInfo(), 02625 false, false, alignment); 02626 EVT addrVT = StNode->getBasePtr().getValueType(); 02627 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT, 02628 StNode->getBasePtr(), 02629 DAG.getConstant(8, addrVT)); 02630 OutChains[1] = DAG.getStore(StNode->getChain(), 02631 dl, 02632 SDValue(Lo64, 0), 02633 LoPtr, 02634 MachinePointerInfo(), 02635 false, false, alignment); 02636 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 02637 } 02638 02639 static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) { 02640 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS) 02641 && "invalid opcode"); 02642 02643 if (Op.getValueType() == MVT::f64) 02644 return LowerF64Op(Op, DAG, Op.getOpcode()); 02645 if (Op.getValueType() != MVT::f128) 02646 return Op; 02647 02648 // Lower fabs/fneg on f128 to fabs/fneg on f64 02649 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64 02650 02651 SDLoc dl(Op); 02652 SDValue SrcReg128 = Op.getOperand(0); 02653 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64, 02654 SrcReg128); 02655 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64, 02656 SrcReg128); 02657 if (isV9) 02658 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64); 02659 else 02660 Hi64 = LowerF64Op(Hi64, DAG, Op.getOpcode()); 02661 02662 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 02663 dl, MVT::f128), 0); 02664 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128, 02665 DstReg128, Hi64); 02666 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128, 02667 DstReg128, Lo64); 02668 return DstReg128; 02669 } 02670 02671 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 02672 02673 if (Op.getValueType() != MVT::i64) 02674 return Op; 02675 02676 SDLoc dl(Op); 02677 SDValue Src1 = Op.getOperand(0); 02678 SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1); 02679 SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1, 02680 DAG.getConstant(32, MVT::i64)); 02681 Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi); 02682 02683 SDValue Src2 = Op.getOperand(1); 02684 SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2); 02685 SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2, 02686 DAG.getConstant(32, MVT::i64)); 02687 Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi); 02688 02689 02690 bool hasChain = false; 02691 unsigned hiOpc = Op.getOpcode(); 02692 switch (Op.getOpcode()) { 02693 default: llvm_unreachable("Invalid opcode"); 02694 case ISD::ADDC: hiOpc = ISD::ADDE; break; 02695 case ISD::ADDE: hasChain = true; break; 02696 case ISD::SUBC: hiOpc = ISD::SUBE; break; 02697 case ISD::SUBE: hasChain = true; break; 02698 } 02699 SDValue Lo; 02700 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue); 02701 if (hasChain) { 02702 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo, 02703 Op.getOperand(2)); 02704 } else { 02705 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo); 02706 } 02707 SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1)); 02708 SDValue Carry = Hi.getValue(1); 02709 02710 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo); 02711 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi); 02712 Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi, 02713 DAG.getConstant(32, MVT::i64)); 02714 02715 SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo); 02716 SDValue Ops[2] = { Dst, Carry }; 02717 return DAG.getMergeValues(Ops, dl); 02718 } 02719 02720 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode() 02721 // in LegalizeDAG.cpp except the order of arguments to the library function. 02722 static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, 02723 const SparcTargetLowering &TLI) 02724 { 02725 unsigned opcode = Op.getOpcode(); 02726 assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode."); 02727 02728 bool isSigned = (opcode == ISD::SMULO); 02729 EVT VT = MVT::i64; 02730 EVT WideVT = MVT::i128; 02731 SDLoc dl(Op); 02732 SDValue LHS = Op.getOperand(0); 02733 02734 if (LHS.getValueType() != VT) 02735 return Op; 02736 02737 SDValue ShiftAmt = DAG.getConstant(63, VT); 02738 02739 SDValue RHS = Op.getOperand(1); 02740 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt); 02741 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt); 02742 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 02743 02744 SDValue MulResult = TLI.makeLibCall(DAG, 02745 RTLIB::MUL_I128, WideVT, 02746 Args, 4, isSigned, dl).first; 02747 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, 02748 MulResult, DAG.getIntPtrConstant(0)); 02749 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, 02750 MulResult, DAG.getIntPtrConstant(1)); 02751 if (isSigned) { 02752 SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 02753 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE); 02754 } else { 02755 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, VT), 02756 ISD::SETNE); 02757 } 02758 // MulResult is a node with an illegal type. Because such things are not 02759 // generally permitted during this phase of legalization, ensure that 02760 // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have 02761 // been folded. 02762 assert(MulResult->use_empty() && "Illegally typed node still in use!"); 02763 02764 SDValue Ops[2] = { BottomHalf, TopHalf } ; 02765 return DAG.getMergeValues(Ops, dl); 02766 } 02767 02768 static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) { 02769 // Monotonic load/stores are legal. 02770 if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) 02771 return Op; 02772 02773 // Otherwise, expand with a fence. 02774 return SDValue(); 02775 } 02776 02777 02778 SDValue SparcTargetLowering:: 02779 LowerOperation(SDValue Op, SelectionDAG &DAG) const { 02780 02781 bool hasHardQuad = Subtarget->hasHardQuad(); 02782 bool isV9 = Subtarget->isV9(); 02783 02784 switch (Op.getOpcode()) { 02785 default: llvm_unreachable("Should not custom lower this!"); 02786 02787 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this, 02788 Subtarget); 02789 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG, 02790 Subtarget); 02791 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 02792 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 02793 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 02794 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 02795 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this, 02796 hasHardQuad); 02797 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this, 02798 hasHardQuad); 02799 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this, 02800 hasHardQuad); 02801 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this, 02802 hasHardQuad); 02803 case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this, 02804 hasHardQuad); 02805 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this, 02806 hasHardQuad); 02807 case ISD::VASTART: return LowerVASTART(Op, DAG, *this); 02808 case ISD::VAARG: return LowerVAARG(Op, DAG); 02809 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG, 02810 Subtarget); 02811 02812 case ISD::LOAD: return LowerF128Load(Op, DAG); 02813 case ISD::STORE: return LowerF128Store(Op, DAG); 02814 case ISD::FADD: return LowerF128Op(Op, DAG, 02815 getLibcallName(RTLIB::ADD_F128), 2); 02816 case ISD::FSUB: return LowerF128Op(Op, DAG, 02817 getLibcallName(RTLIB::SUB_F128), 2); 02818 case ISD::FMUL: return LowerF128Op(Op, DAG, 02819 getLibcallName(RTLIB::MUL_F128), 2); 02820 case ISD::FDIV: return LowerF128Op(Op, DAG, 02821 getLibcallName(RTLIB::DIV_F128), 2); 02822 case ISD::FSQRT: return LowerF128Op(Op, DAG, 02823 getLibcallName(RTLIB::SQRT_F128),1); 02824 case ISD::FABS: 02825 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9); 02826 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this); 02827 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this); 02828 case ISD::ADDC: 02829 case ISD::ADDE: 02830 case ISD::SUBC: 02831 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 02832 case ISD::UMULO: 02833 case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this); 02834 case ISD::ATOMIC_LOAD: 02835 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG); 02836 } 02837 } 02838 02839 MachineBasicBlock * 02840 SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 02841 MachineBasicBlock *BB) const { 02842 switch (MI->getOpcode()) { 02843 default: llvm_unreachable("Unknown SELECT_CC!"); 02844 case SP::SELECT_CC_Int_ICC: 02845 case SP::SELECT_CC_FP_ICC: 02846 case SP::SELECT_CC_DFP_ICC: 02847 case SP::SELECT_CC_QFP_ICC: 02848 return expandSelectCC(MI, BB, SP::BCOND); 02849 case SP::SELECT_CC_Int_FCC: 02850 case SP::SELECT_CC_FP_FCC: 02851 case SP::SELECT_CC_DFP_FCC: 02852 case SP::SELECT_CC_QFP_FCC: 02853 return expandSelectCC(MI, BB, SP::FBCOND); 02854 02855 case SP::ATOMIC_LOAD_ADD_32: 02856 return expandAtomicRMW(MI, BB, SP::ADDrr); 02857 case SP::ATOMIC_LOAD_ADD_64: 02858 return expandAtomicRMW(MI, BB, SP::ADDXrr); 02859 case SP::ATOMIC_LOAD_SUB_32: 02860 return expandAtomicRMW(MI, BB, SP::SUBrr); 02861 case SP::ATOMIC_LOAD_SUB_64: 02862 return expandAtomicRMW(MI, BB, SP::SUBXrr); 02863 case SP::ATOMIC_LOAD_AND_32: 02864 return expandAtomicRMW(MI, BB, SP::ANDrr); 02865 case SP::ATOMIC_LOAD_AND_64: 02866 return expandAtomicRMW(MI, BB, SP::ANDXrr); 02867 case SP::ATOMIC_LOAD_OR_32: 02868 return expandAtomicRMW(MI, BB, SP::ORrr); 02869 case SP::ATOMIC_LOAD_OR_64: 02870 return expandAtomicRMW(MI, BB, SP::ORXrr); 02871 case SP::ATOMIC_LOAD_XOR_32: 02872 return expandAtomicRMW(MI, BB, SP::XORrr); 02873 case SP::ATOMIC_LOAD_XOR_64: 02874 return expandAtomicRMW(MI, BB, SP::XORXrr); 02875 case SP::ATOMIC_LOAD_NAND_32: 02876 return expandAtomicRMW(MI, BB, SP::ANDrr); 02877 case SP::ATOMIC_LOAD_NAND_64: 02878 return expandAtomicRMW(MI, BB, SP::ANDXrr); 02879 02880 case SP::ATOMIC_SWAP_64: 02881 return expandAtomicRMW(MI, BB, 0); 02882 02883 case SP::ATOMIC_LOAD_MAX_32: 02884 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_G); 02885 case SP::ATOMIC_LOAD_MAX_64: 02886 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_G); 02887 case SP::ATOMIC_LOAD_MIN_32: 02888 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LE); 02889 case SP::ATOMIC_LOAD_MIN_64: 02890 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LE); 02891 case SP::ATOMIC_LOAD_UMAX_32: 02892 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_GU); 02893 case SP::ATOMIC_LOAD_UMAX_64: 02894 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_GU); 02895 case SP::ATOMIC_LOAD_UMIN_32: 02896 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LEU); 02897 case SP::ATOMIC_LOAD_UMIN_64: 02898 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LEU); 02899 } 02900 } 02901 02902 MachineBasicBlock* 02903 SparcTargetLowering::expandSelectCC(MachineInstr *MI, 02904 MachineBasicBlock *BB, 02905 unsigned BROpcode) const { 02906 const TargetInstrInfo &TII = 02907 *getTargetMachine().getSubtargetImpl()->getInstrInfo(); 02908 DebugLoc dl = MI->getDebugLoc(); 02909 unsigned CC = (SPCC::CondCodes)MI->getOperand(3).getImm(); 02910 02911 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 02912 // control-flow pattern. The incoming instruction knows the destination vreg 02913 // to set, the condition code register to branch on, the true/false values to 02914 // select between, and a branch opcode to use. 02915 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 02916 MachineFunction::iterator It = BB; 02917 ++It; 02918 02919 // thisMBB: 02920 // ... 02921 // TrueVal = ... 02922 // [f]bCC copy1MBB 02923 // fallthrough --> copy0MBB 02924 MachineBasicBlock *thisMBB = BB; 02925 MachineFunction *F = BB->getParent(); 02926 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 02927 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 02928 F->insert(It, copy0MBB); 02929 F->insert(It, sinkMBB); 02930 02931 // Transfer the remainder of BB and its successor edges to sinkMBB. 02932 sinkMBB->splice(sinkMBB->begin(), BB, 02933 std::next(MachineBasicBlock::iterator(MI)), 02934 BB->end()); 02935 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 02936 02937 // Add the true and fallthrough blocks as its successors. 02938 BB->addSuccessor(copy0MBB); 02939 BB->addSuccessor(sinkMBB); 02940 02941 BuildMI(BB, dl, TII.get(BROpcode)).addMBB(sinkMBB).addImm(CC); 02942 02943 // copy0MBB: 02944 // %FalseValue = ... 02945 // # fallthrough to sinkMBB 02946 BB = copy0MBB; 02947 02948 // Update machine-CFG edges 02949 BB->addSuccessor(sinkMBB); 02950 02951 // sinkMBB: 02952 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 02953 // ... 02954 BB = sinkMBB; 02955 BuildMI(*BB, BB->begin(), dl, TII.get(SP::PHI), MI->getOperand(0).getReg()) 02956 .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB) 02957 .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB); 02958 02959 MI->eraseFromParent(); // The pseudo instruction is gone now. 02960 return BB; 02961 } 02962 02963 MachineBasicBlock* 02964 SparcTargetLowering::expandAtomicRMW(MachineInstr *MI, 02965 MachineBasicBlock *MBB, 02966 unsigned Opcode, 02967 unsigned CondCode) const { 02968 const TargetInstrInfo &TII = 02969 *getTargetMachine().getSubtargetImpl()->getInstrInfo(); 02970 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 02971 DebugLoc DL = MI->getDebugLoc(); 02972 02973 // MI is an atomic read-modify-write instruction of the form: 02974 // 02975 // rd = atomicrmw<op> addr, rs2 02976 // 02977 // All three operands are registers. 02978 unsigned DestReg = MI->getOperand(0).getReg(); 02979 unsigned AddrReg = MI->getOperand(1).getReg(); 02980 unsigned Rs2Reg = MI->getOperand(2).getReg(); 02981 02982 // SelectionDAG has already inserted memory barriers before and after MI, so 02983 // we simply have to implement the operatiuon in terms of compare-and-swap. 02984 // 02985 // %val0 = load %addr 02986 // loop: 02987 // %val = phi %val0, %dest 02988 // %upd = op %val, %rs2 02989 // %dest = cas %addr, %val, %upd 02990 // cmp %val, %dest 02991 // bne loop 02992 // done: 02993 // 02994 bool is64Bit = SP::I64RegsRegClass.hasSubClassEq(MRI.getRegClass(DestReg)); 02995 const TargetRegisterClass *ValueRC = 02996 is64Bit ? &SP::I64RegsRegClass : &SP::IntRegsRegClass; 02997 unsigned Val0Reg = MRI.createVirtualRegister(ValueRC); 02998 02999 BuildMI(*MBB, MI, DL, TII.get(is64Bit ? SP::LDXri : SP::LDri), Val0Reg) 03000 .addReg(AddrReg).addImm(0); 03001 03002 // Split the basic block MBB before MI and insert the loop block in the hole. 03003 MachineFunction::iterator MFI = MBB; 03004 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 03005 MachineFunction *MF = MBB->getParent(); 03006 MachineBasicBlock *LoopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 03007 MachineBasicBlock *DoneMBB = MF->CreateMachineBasicBlock(LLVM_BB); 03008 ++MFI; 03009 MF->insert(MFI, LoopMBB); 03010 MF->insert(MFI, DoneMBB); 03011 03012 // Move MI and following instructions to DoneMBB. 03013 DoneMBB->splice(DoneMBB->begin(), MBB, MI, MBB->end()); 03014 DoneMBB->transferSuccessorsAndUpdatePHIs(MBB); 03015 03016 // Connect the CFG again. 03017 MBB->addSuccessor(LoopMBB); 03018 LoopMBB->addSuccessor(LoopMBB); 03019 LoopMBB->addSuccessor(DoneMBB); 03020 03021 // Build the loop block. 03022 unsigned ValReg = MRI.createVirtualRegister(ValueRC); 03023 // Opcode == 0 means try to write Rs2Reg directly (ATOMIC_SWAP). 03024 unsigned UpdReg = (Opcode ? MRI.createVirtualRegister(ValueRC) : Rs2Reg); 03025 03026 BuildMI(LoopMBB, DL, TII.get(SP::PHI), ValReg) 03027 .addReg(Val0Reg).addMBB(MBB) 03028 .addReg(DestReg).addMBB(LoopMBB); 03029 03030 if (CondCode) { 03031 // This is one of the min/max operations. We need a CMPrr followed by a 03032 // MOVXCC/MOVICC. 03033 BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(Rs2Reg); 03034 BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg) 03035 .addReg(ValReg).addReg(Rs2Reg).addImm(CondCode); 03036 } else if (Opcode) { 03037 BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg) 03038 .addReg(ValReg).addReg(Rs2Reg); 03039 } 03040 03041 if (MI->getOpcode() == SP::ATOMIC_LOAD_NAND_32 || 03042 MI->getOpcode() == SP::ATOMIC_LOAD_NAND_64) { 03043 unsigned TmpReg = UpdReg; 03044 UpdReg = MRI.createVirtualRegister(ValueRC); 03045 BuildMI(LoopMBB, DL, TII.get(SP::XORri), UpdReg).addReg(TmpReg).addImm(-1); 03046 } 03047 03048 BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::CASXrr : SP::CASrr), DestReg) 03049 .addReg(AddrReg).addReg(ValReg).addReg(UpdReg) 03050 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 03051 BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(DestReg); 03052 BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::BPXCC : SP::BCOND)) 03053 .addMBB(LoopMBB).addImm(SPCC::ICC_NE); 03054 03055 MI->eraseFromParent(); 03056 return DoneMBB; 03057 } 03058 03059 //===----------------------------------------------------------------------===// 03060 // Sparc Inline Assembly Support 03061 //===----------------------------------------------------------------------===// 03062 03063 /// getConstraintType - Given a constraint letter, return the type of 03064 /// constraint it is for this target. 03065 SparcTargetLowering::ConstraintType 03066 SparcTargetLowering::getConstraintType(const std::string &Constraint) const { 03067 if (Constraint.size() == 1) { 03068 switch (Constraint[0]) { 03069 default: break; 03070 case 'r': return C_RegisterClass; 03071 case 'I': // SIMM13 03072 return C_Other; 03073 } 03074 } 03075 03076 return TargetLowering::getConstraintType(Constraint); 03077 } 03078 03079 TargetLowering::ConstraintWeight SparcTargetLowering:: 03080 getSingleConstraintMatchWeight(AsmOperandInfo &info, 03081 const char *constraint) const { 03082 ConstraintWeight weight = CW_Invalid; 03083 Value *CallOperandVal = info.CallOperandVal; 03084 // If we don't have a value, we can't do a match, 03085 // but allow it at the lowest weight. 03086 if (!CallOperandVal) 03087 return CW_Default; 03088 03089 // Look at the constraint type. 03090 switch (*constraint) { 03091 default: 03092 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 03093 break; 03094 case 'I': // SIMM13 03095 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 03096 if (isInt<13>(C->getSExtValue())) 03097 weight = CW_Constant; 03098 } 03099 break; 03100 } 03101 return weight; 03102 } 03103 03104 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 03105 /// vector. If it is invalid, don't add anything to Ops. 03106 void SparcTargetLowering:: 03107 LowerAsmOperandForConstraint(SDValue Op, 03108 std::string &Constraint, 03109 std::vector<SDValue> &Ops, 03110 SelectionDAG &DAG) const { 03111 SDValue Result(nullptr, 0); 03112 03113 // Only support length 1 constraints for now. 03114 if (Constraint.length() > 1) 03115 return; 03116 03117 char ConstraintLetter = Constraint[0]; 03118 switch (ConstraintLetter) { 03119 default: break; 03120 case 'I': 03121 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 03122 if (isInt<13>(C->getSExtValue())) { 03123 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType()); 03124 break; 03125 } 03126 return; 03127 } 03128 } 03129 03130 if (Result.getNode()) { 03131 Ops.push_back(Result); 03132 return; 03133 } 03134 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 03135 } 03136 03137 std::pair<unsigned, const TargetRegisterClass*> 03138 SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 03139 MVT VT) const { 03140 if (Constraint.size() == 1) { 03141 switch (Constraint[0]) { 03142 case 'r': 03143 return std::make_pair(0U, &SP::IntRegsRegClass); 03144 } 03145 } else if (!Constraint.empty() && Constraint.size() <= 5 03146 && Constraint[0] == '{' && *(Constraint.end()-1) == '}') { 03147 // constraint = '{r<d>}' 03148 // Remove the braces from around the name. 03149 StringRef name(Constraint.data()+1, Constraint.size()-2); 03150 // Handle register aliases: 03151 // r0-r7 -> g0-g7 03152 // r8-r15 -> o0-o7 03153 // r16-r23 -> l0-l7 03154 // r24-r31 -> i0-i7 03155 uint64_t intVal = 0; 03156 if (name.substr(0, 1).equals("r") 03157 && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) { 03158 const char regTypes[] = { 'g', 'o', 'l', 'i' }; 03159 char regType = regTypes[intVal/8]; 03160 char regIdx = '0' + (intVal % 8); 03161 char tmp[] = { '{', regType, regIdx, '}', 0 }; 03162 std::string newConstraint = std::string(tmp); 03163 return TargetLowering::getRegForInlineAsmConstraint(newConstraint, VT); 03164 } 03165 } 03166 03167 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 03168 } 03169 03170 bool 03171 SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 03172 // The Sparc target isn't yet aware of offsets. 03173 return false; 03174 } 03175 03176 void SparcTargetLowering::ReplaceNodeResults(SDNode *N, 03177 SmallVectorImpl<SDValue>& Results, 03178 SelectionDAG &DAG) const { 03179 03180 SDLoc dl(N); 03181 03182 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL; 03183 03184 switch (N->getOpcode()) { 03185 default: 03186 llvm_unreachable("Do not know how to custom type legalize this operation!"); 03187 03188 case ISD::FP_TO_SINT: 03189 case ISD::FP_TO_UINT: 03190 // Custom lower only if it involves f128 or i64. 03191 if (N->getOperand(0).getValueType() != MVT::f128 03192 || N->getValueType(0) != MVT::i64) 03193 return; 03194 libCall = ((N->getOpcode() == ISD::FP_TO_SINT) 03195 ? RTLIB::FPTOSINT_F128_I64 03196 : RTLIB::FPTOUINT_F128_I64); 03197 03198 Results.push_back(LowerF128Op(SDValue(N, 0), 03199 DAG, 03200 getLibcallName(libCall), 03201 1)); 03202 return; 03203 03204 case ISD::SINT_TO_FP: 03205 case ISD::UINT_TO_FP: 03206 // Custom lower only if it involves f128 or i64. 03207 if (N->getValueType(0) != MVT::f128 03208 || N->getOperand(0).getValueType() != MVT::i64) 03209 return; 03210 03211 libCall = ((N->getOpcode() == ISD::SINT_TO_FP) 03212 ? RTLIB::SINTTOFP_I64_F128 03213 : RTLIB::UINTTOFP_I64_F128); 03214 03215 Results.push_back(LowerF128Op(SDValue(N, 0), 03216 DAG, 03217 getLibcallName(libCall), 03218 1)); 03219 return; 03220 } 03221 }