LLVM API Documentation
00001 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This pass transforms simple global variables that never have their address 00011 // taken. If obviously true, it marks read/write globals as constant, deletes 00012 // variables only stored to, etc. 00013 // 00014 //===----------------------------------------------------------------------===// 00015 00016 #include "llvm/Transforms/IPO.h" 00017 #include "llvm/ADT/DenseMap.h" 00018 #include "llvm/ADT/STLExtras.h" 00019 #include "llvm/ADT/SmallPtrSet.h" 00020 #include "llvm/ADT/SmallSet.h" 00021 #include "llvm/ADT/SmallVector.h" 00022 #include "llvm/ADT/Statistic.h" 00023 #include "llvm/Analysis/ConstantFolding.h" 00024 #include "llvm/Analysis/MemoryBuiltins.h" 00025 #include "llvm/IR/CallSite.h" 00026 #include "llvm/IR/CallingConv.h" 00027 #include "llvm/IR/Constants.h" 00028 #include "llvm/IR/DataLayout.h" 00029 #include "llvm/IR/DerivedTypes.h" 00030 #include "llvm/IR/GetElementPtrTypeIterator.h" 00031 #include "llvm/IR/Instructions.h" 00032 #include "llvm/IR/IntrinsicInst.h" 00033 #include "llvm/IR/Module.h" 00034 #include "llvm/IR/Operator.h" 00035 #include "llvm/IR/ValueHandle.h" 00036 #include "llvm/Pass.h" 00037 #include "llvm/Support/Debug.h" 00038 #include "llvm/Support/ErrorHandling.h" 00039 #include "llvm/Support/MathExtras.h" 00040 #include "llvm/Support/raw_ostream.h" 00041 #include "llvm/Target/TargetLibraryInfo.h" 00042 #include "llvm/Transforms/Utils/CtorUtils.h" 00043 #include "llvm/Transforms/Utils/GlobalStatus.h" 00044 #include "llvm/Transforms/Utils/ModuleUtils.h" 00045 #include <algorithm> 00046 #include <deque> 00047 using namespace llvm; 00048 00049 #define DEBUG_TYPE "globalopt" 00050 00051 STATISTIC(NumMarked , "Number of globals marked constant"); 00052 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr"); 00053 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars"); 00054 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd"); 00055 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them"); 00056 STATISTIC(NumDeleted , "Number of globals deleted"); 00057 STATISTIC(NumFnDeleted , "Number of functions deleted"); 00058 STATISTIC(NumGlobUses , "Number of global uses devirtualized"); 00059 STATISTIC(NumLocalized , "Number of globals localized"); 00060 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans"); 00061 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc"); 00062 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated"); 00063 STATISTIC(NumNestRemoved , "Number of nest attributes removed"); 00064 STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); 00065 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); 00066 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed"); 00067 00068 namespace { 00069 struct GlobalOpt : public ModulePass { 00070 void getAnalysisUsage(AnalysisUsage &AU) const override { 00071 AU.addRequired<TargetLibraryInfo>(); 00072 } 00073 static char ID; // Pass identification, replacement for typeid 00074 GlobalOpt() : ModulePass(ID) { 00075 initializeGlobalOptPass(*PassRegistry::getPassRegistry()); 00076 } 00077 00078 bool runOnModule(Module &M) override; 00079 00080 private: 00081 bool OptimizeFunctions(Module &M); 00082 bool OptimizeGlobalVars(Module &M); 00083 bool OptimizeGlobalAliases(Module &M); 00084 bool ProcessGlobal(GlobalVariable *GV,Module::global_iterator &GVI); 00085 bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI, 00086 const GlobalStatus &GS); 00087 bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn); 00088 00089 const DataLayout *DL; 00090 TargetLibraryInfo *TLI; 00091 }; 00092 } 00093 00094 char GlobalOpt::ID = 0; 00095 INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt", 00096 "Global Variable Optimizer", false, false) 00097 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 00098 INITIALIZE_PASS_END(GlobalOpt, "globalopt", 00099 "Global Variable Optimizer", false, false) 00100 00101 ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); } 00102 00103 /// isLeakCheckerRoot - Is this global variable possibly used by a leak checker 00104 /// as a root? If so, we might not really want to eliminate the stores to it. 00105 static bool isLeakCheckerRoot(GlobalVariable *GV) { 00106 // A global variable is a root if it is a pointer, or could plausibly contain 00107 // a pointer. There are two challenges; one is that we could have a struct 00108 // the has an inner member which is a pointer. We recurse through the type to 00109 // detect these (up to a point). The other is that we may actually be a union 00110 // of a pointer and another type, and so our LLVM type is an integer which 00111 // gets converted into a pointer, or our type is an [i8 x #] with a pointer 00112 // potentially contained here. 00113 00114 if (GV->hasPrivateLinkage()) 00115 return false; 00116 00117 SmallVector<Type *, 4> Types; 00118 Types.push_back(cast<PointerType>(GV->getType())->getElementType()); 00119 00120 unsigned Limit = 20; 00121 do { 00122 Type *Ty = Types.pop_back_val(); 00123 switch (Ty->getTypeID()) { 00124 default: break; 00125 case Type::PointerTyID: return true; 00126 case Type::ArrayTyID: 00127 case Type::VectorTyID: { 00128 SequentialType *STy = cast<SequentialType>(Ty); 00129 Types.push_back(STy->getElementType()); 00130 break; 00131 } 00132 case Type::StructTyID: { 00133 StructType *STy = cast<StructType>(Ty); 00134 if (STy->isOpaque()) return true; 00135 for (StructType::element_iterator I = STy->element_begin(), 00136 E = STy->element_end(); I != E; ++I) { 00137 Type *InnerTy = *I; 00138 if (isa<PointerType>(InnerTy)) return true; 00139 if (isa<CompositeType>(InnerTy)) 00140 Types.push_back(InnerTy); 00141 } 00142 break; 00143 } 00144 } 00145 if (--Limit == 0) return true; 00146 } while (!Types.empty()); 00147 return false; 00148 } 00149 00150 /// Given a value that is stored to a global but never read, determine whether 00151 /// it's safe to remove the store and the chain of computation that feeds the 00152 /// store. 00153 static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) { 00154 do { 00155 if (isa<Constant>(V)) 00156 return true; 00157 if (!V->hasOneUse()) 00158 return false; 00159 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) || 00160 isa<GlobalValue>(V)) 00161 return false; 00162 if (isAllocationFn(V, TLI)) 00163 return true; 00164 00165 Instruction *I = cast<Instruction>(V); 00166 if (I->mayHaveSideEffects()) 00167 return false; 00168 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 00169 if (!GEP->hasAllConstantIndices()) 00170 return false; 00171 } else if (I->getNumOperands() != 1) { 00172 return false; 00173 } 00174 00175 V = I->getOperand(0); 00176 } while (1); 00177 } 00178 00179 /// CleanupPointerRootUsers - This GV is a pointer root. Loop over all users 00180 /// of the global and clean up any that obviously don't assign the global a 00181 /// value that isn't dynamically allocated. 00182 /// 00183 static bool CleanupPointerRootUsers(GlobalVariable *GV, 00184 const TargetLibraryInfo *TLI) { 00185 // A brief explanation of leak checkers. The goal is to find bugs where 00186 // pointers are forgotten, causing an accumulating growth in memory 00187 // usage over time. The common strategy for leak checkers is to whitelist the 00188 // memory pointed to by globals at exit. This is popular because it also 00189 // solves another problem where the main thread of a C++ program may shut down 00190 // before other threads that are still expecting to use those globals. To 00191 // handle that case, we expect the program may create a singleton and never 00192 // destroy it. 00193 00194 bool Changed = false; 00195 00196 // If Dead[n].first is the only use of a malloc result, we can delete its 00197 // chain of computation and the store to the global in Dead[n].second. 00198 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead; 00199 00200 // Constants can't be pointers to dynamically allocated memory. 00201 for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end(); 00202 UI != E;) { 00203 User *U = *UI++; 00204 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 00205 Value *V = SI->getValueOperand(); 00206 if (isa<Constant>(V)) { 00207 Changed = true; 00208 SI->eraseFromParent(); 00209 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 00210 if (I->hasOneUse()) 00211 Dead.push_back(std::make_pair(I, SI)); 00212 } 00213 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) { 00214 if (isa<Constant>(MSI->getValue())) { 00215 Changed = true; 00216 MSI->eraseFromParent(); 00217 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) { 00218 if (I->hasOneUse()) 00219 Dead.push_back(std::make_pair(I, MSI)); 00220 } 00221 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) { 00222 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource()); 00223 if (MemSrc && MemSrc->isConstant()) { 00224 Changed = true; 00225 MTI->eraseFromParent(); 00226 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) { 00227 if (I->hasOneUse()) 00228 Dead.push_back(std::make_pair(I, MTI)); 00229 } 00230 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 00231 if (CE->use_empty()) { 00232 CE->destroyConstant(); 00233 Changed = true; 00234 } 00235 } else if (Constant *C = dyn_cast<Constant>(U)) { 00236 if (isSafeToDestroyConstant(C)) { 00237 C->destroyConstant(); 00238 // This could have invalidated UI, start over from scratch. 00239 Dead.clear(); 00240 CleanupPointerRootUsers(GV, TLI); 00241 return true; 00242 } 00243 } 00244 } 00245 00246 for (int i = 0, e = Dead.size(); i != e; ++i) { 00247 if (IsSafeComputationToRemove(Dead[i].first, TLI)) { 00248 Dead[i].second->eraseFromParent(); 00249 Instruction *I = Dead[i].first; 00250 do { 00251 if (isAllocationFn(I, TLI)) 00252 break; 00253 Instruction *J = dyn_cast<Instruction>(I->getOperand(0)); 00254 if (!J) 00255 break; 00256 I->eraseFromParent(); 00257 I = J; 00258 } while (1); 00259 I->eraseFromParent(); 00260 } 00261 } 00262 00263 return Changed; 00264 } 00265 00266 /// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all 00267 /// users of the global, cleaning up the obvious ones. This is largely just a 00268 /// quick scan over the use list to clean up the easy and obvious cruft. This 00269 /// returns true if it made a change. 00270 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, 00271 const DataLayout *DL, 00272 TargetLibraryInfo *TLI) { 00273 bool Changed = false; 00274 // Note that we need to use a weak value handle for the worklist items. When 00275 // we delete a constant array, we may also be holding pointer to one of its 00276 // elements (or an element of one of its elements if we're dealing with an 00277 // array of arrays) in the worklist. 00278 SmallVector<WeakVH, 8> WorkList(V->user_begin(), V->user_end()); 00279 while (!WorkList.empty()) { 00280 Value *UV = WorkList.pop_back_val(); 00281 if (!UV) 00282 continue; 00283 00284 User *U = cast<User>(UV); 00285 00286 if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 00287 if (Init) { 00288 // Replace the load with the initializer. 00289 LI->replaceAllUsesWith(Init); 00290 LI->eraseFromParent(); 00291 Changed = true; 00292 } 00293 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 00294 // Store must be unreachable or storing Init into the global. 00295 SI->eraseFromParent(); 00296 Changed = true; 00297 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 00298 if (CE->getOpcode() == Instruction::GetElementPtr) { 00299 Constant *SubInit = nullptr; 00300 if (Init) 00301 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 00302 Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI); 00303 } else if ((CE->getOpcode() == Instruction::BitCast && 00304 CE->getType()->isPointerTy()) || 00305 CE->getOpcode() == Instruction::AddrSpaceCast) { 00306 // Pointer cast, delete any stores and memsets to the global. 00307 Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, TLI); 00308 } 00309 00310 if (CE->use_empty()) { 00311 CE->destroyConstant(); 00312 Changed = true; 00313 } 00314 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { 00315 // Do not transform "gepinst (gep constexpr (GV))" here, because forming 00316 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold 00317 // and will invalidate our notion of what Init is. 00318 Constant *SubInit = nullptr; 00319 if (!isa<ConstantExpr>(GEP->getOperand(0))) { 00320 ConstantExpr *CE = 00321 dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, DL, TLI)); 00322 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr) 00323 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 00324 00325 // If the initializer is an all-null value and we have an inbounds GEP, 00326 // we already know what the result of any load from that GEP is. 00327 // TODO: Handle splats. 00328 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds()) 00329 SubInit = Constant::getNullValue(GEP->getType()->getElementType()); 00330 } 00331 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI); 00332 00333 if (GEP->use_empty()) { 00334 GEP->eraseFromParent(); 00335 Changed = true; 00336 } 00337 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv 00338 if (MI->getRawDest() == V) { 00339 MI->eraseFromParent(); 00340 Changed = true; 00341 } 00342 00343 } else if (Constant *C = dyn_cast<Constant>(U)) { 00344 // If we have a chain of dead constantexprs or other things dangling from 00345 // us, and if they are all dead, nuke them without remorse. 00346 if (isSafeToDestroyConstant(C)) { 00347 C->destroyConstant(); 00348 CleanupConstantGlobalUsers(V, Init, DL, TLI); 00349 return true; 00350 } 00351 } 00352 } 00353 return Changed; 00354 } 00355 00356 /// isSafeSROAElementUse - Return true if the specified instruction is a safe 00357 /// user of a derived expression from a global that we want to SROA. 00358 static bool isSafeSROAElementUse(Value *V) { 00359 // We might have a dead and dangling constant hanging off of here. 00360 if (Constant *C = dyn_cast<Constant>(V)) 00361 return isSafeToDestroyConstant(C); 00362 00363 Instruction *I = dyn_cast<Instruction>(V); 00364 if (!I) return false; 00365 00366 // Loads are ok. 00367 if (isa<LoadInst>(I)) return true; 00368 00369 // Stores *to* the pointer are ok. 00370 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 00371 return SI->getOperand(0) != V; 00372 00373 // Otherwise, it must be a GEP. 00374 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I); 00375 if (!GEPI) return false; 00376 00377 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) || 00378 !cast<Constant>(GEPI->getOperand(1))->isNullValue()) 00379 return false; 00380 00381 for (User *U : GEPI->users()) 00382 if (!isSafeSROAElementUse(U)) 00383 return false; 00384 return true; 00385 } 00386 00387 00388 /// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value. 00389 /// Look at it and its uses and decide whether it is safe to SROA this global. 00390 /// 00391 static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { 00392 // The user of the global must be a GEP Inst or a ConstantExpr GEP. 00393 if (!isa<GetElementPtrInst>(U) && 00394 (!isa<ConstantExpr>(U) || 00395 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr)) 00396 return false; 00397 00398 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we 00399 // don't like < 3 operand CE's, and we don't like non-constant integer 00400 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some 00401 // value of C. 00402 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) || 00403 !cast<Constant>(U->getOperand(1))->isNullValue() || 00404 !isa<ConstantInt>(U->getOperand(2))) 00405 return false; 00406 00407 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U); 00408 ++GEPI; // Skip over the pointer index. 00409 00410 // If this is a use of an array allocation, do a bit more checking for sanity. 00411 if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) { 00412 uint64_t NumElements = AT->getNumElements(); 00413 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2)); 00414 00415 // Check to make sure that index falls within the array. If not, 00416 // something funny is going on, so we won't do the optimization. 00417 // 00418 if (Idx->getZExtValue() >= NumElements) 00419 return false; 00420 00421 // We cannot scalar repl this level of the array unless any array 00422 // sub-indices are in-range constants. In particular, consider: 00423 // A[0][i]. We cannot know that the user isn't doing invalid things like 00424 // allowing i to index an out-of-range subscript that accesses A[1]. 00425 // 00426 // Scalar replacing *just* the outer index of the array is probably not 00427 // going to be a win anyway, so just give up. 00428 for (++GEPI; // Skip array index. 00429 GEPI != E; 00430 ++GEPI) { 00431 uint64_t NumElements; 00432 if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI)) 00433 NumElements = SubArrayTy->getNumElements(); 00434 else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI)) 00435 NumElements = SubVectorTy->getNumElements(); 00436 else { 00437 assert((*GEPI)->isStructTy() && 00438 "Indexed GEP type is not array, vector, or struct!"); 00439 continue; 00440 } 00441 00442 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); 00443 if (!IdxVal || IdxVal->getZExtValue() >= NumElements) 00444 return false; 00445 } 00446 } 00447 00448 for (User *UU : U->users()) 00449 if (!isSafeSROAElementUse(UU)) 00450 return false; 00451 00452 return true; 00453 } 00454 00455 /// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it 00456 /// is safe for us to perform this transformation. 00457 /// 00458 static bool GlobalUsersSafeToSRA(GlobalValue *GV) { 00459 for (User *U : GV->users()) 00460 if (!IsUserOfGlobalSafeForSRA(U, GV)) 00461 return false; 00462 00463 return true; 00464 } 00465 00466 00467 /// SRAGlobal - Perform scalar replacement of aggregates on the specified global 00468 /// variable. This opens the door for other optimizations by exposing the 00469 /// behavior of the program in a more fine-grained way. We have determined that 00470 /// this transformation is safe already. We return the first global variable we 00471 /// insert so that the caller can reprocess it. 00472 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) { 00473 // Make sure this global only has simple uses that we can SRA. 00474 if (!GlobalUsersSafeToSRA(GV)) 00475 return nullptr; 00476 00477 assert(GV->hasLocalLinkage() && !GV->isConstant()); 00478 Constant *Init = GV->getInitializer(); 00479 Type *Ty = Init->getType(); 00480 00481 std::vector<GlobalVariable*> NewGlobals; 00482 Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); 00483 00484 // Get the alignment of the global, either explicit or target-specific. 00485 unsigned StartAlignment = GV->getAlignment(); 00486 if (StartAlignment == 0) 00487 StartAlignment = DL.getABITypeAlignment(GV->getType()); 00488 00489 if (StructType *STy = dyn_cast<StructType>(Ty)) { 00490 NewGlobals.reserve(STy->getNumElements()); 00491 const StructLayout &Layout = *DL.getStructLayout(STy); 00492 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 00493 Constant *In = Init->getAggregateElement(i); 00494 assert(In && "Couldn't get element of initializer?"); 00495 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false, 00496 GlobalVariable::InternalLinkage, 00497 In, GV->getName()+"."+Twine(i), 00498 GV->getThreadLocalMode(), 00499 GV->getType()->getAddressSpace()); 00500 Globals.insert(GV, NGV); 00501 NewGlobals.push_back(NGV); 00502 00503 // Calculate the known alignment of the field. If the original aggregate 00504 // had 256 byte alignment for example, something might depend on that: 00505 // propagate info to each field. 00506 uint64_t FieldOffset = Layout.getElementOffset(i); 00507 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset); 00508 if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i))) 00509 NGV->setAlignment(NewAlign); 00510 } 00511 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) { 00512 unsigned NumElements = 0; 00513 if (ArrayType *ATy = dyn_cast<ArrayType>(STy)) 00514 NumElements = ATy->getNumElements(); 00515 else 00516 NumElements = cast<VectorType>(STy)->getNumElements(); 00517 00518 if (NumElements > 16 && GV->hasNUsesOrMore(16)) 00519 return nullptr; // It's not worth it. 00520 NewGlobals.reserve(NumElements); 00521 00522 uint64_t EltSize = DL.getTypeAllocSize(STy->getElementType()); 00523 unsigned EltAlign = DL.getABITypeAlignment(STy->getElementType()); 00524 for (unsigned i = 0, e = NumElements; i != e; ++i) { 00525 Constant *In = Init->getAggregateElement(i); 00526 assert(In && "Couldn't get element of initializer?"); 00527 00528 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false, 00529 GlobalVariable::InternalLinkage, 00530 In, GV->getName()+"."+Twine(i), 00531 GV->getThreadLocalMode(), 00532 GV->getType()->getAddressSpace()); 00533 Globals.insert(GV, NGV); 00534 NewGlobals.push_back(NGV); 00535 00536 // Calculate the known alignment of the field. If the original aggregate 00537 // had 256 byte alignment for example, something might depend on that: 00538 // propagate info to each field. 00539 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i); 00540 if (NewAlign > EltAlign) 00541 NGV->setAlignment(NewAlign); 00542 } 00543 } 00544 00545 if (NewGlobals.empty()) 00546 return nullptr; 00547 00548 DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV); 00549 00550 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext())); 00551 00552 // Loop over all of the uses of the global, replacing the constantexpr geps, 00553 // with smaller constantexpr geps or direct references. 00554 while (!GV->use_empty()) { 00555 User *GEP = GV->user_back(); 00556 assert(((isa<ConstantExpr>(GEP) && 00557 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| 00558 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"); 00559 00560 // Ignore the 1th operand, which has to be zero or else the program is quite 00561 // broken (undefined). Get the 2nd operand, which is the structure or array 00562 // index. 00563 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 00564 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access. 00565 00566 Value *NewPtr = NewGlobals[Val]; 00567 00568 // Form a shorter GEP if needed. 00569 if (GEP->getNumOperands() > 3) { 00570 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) { 00571 SmallVector<Constant*, 8> Idxs; 00572 Idxs.push_back(NullInt); 00573 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) 00574 Idxs.push_back(CE->getOperand(i)); 00575 NewPtr = ConstantExpr::getGetElementPtr(cast<Constant>(NewPtr), Idxs); 00576 } else { 00577 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); 00578 SmallVector<Value*, 8> Idxs; 00579 Idxs.push_back(NullInt); 00580 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) 00581 Idxs.push_back(GEPI->getOperand(i)); 00582 NewPtr = GetElementPtrInst::Create(NewPtr, Idxs, 00583 GEPI->getName()+"."+Twine(Val),GEPI); 00584 } 00585 } 00586 GEP->replaceAllUsesWith(NewPtr); 00587 00588 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP)) 00589 GEPI->eraseFromParent(); 00590 else 00591 cast<ConstantExpr>(GEP)->destroyConstant(); 00592 } 00593 00594 // Delete the old global, now that it is dead. 00595 Globals.erase(GV); 00596 ++NumSRA; 00597 00598 // Loop over the new globals array deleting any globals that are obviously 00599 // dead. This can arise due to scalarization of a structure or an array that 00600 // has elements that are dead. 00601 unsigned FirstGlobal = 0; 00602 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i) 00603 if (NewGlobals[i]->use_empty()) { 00604 Globals.erase(NewGlobals[i]); 00605 if (FirstGlobal == i) ++FirstGlobal; 00606 } 00607 00608 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : nullptr; 00609 } 00610 00611 /// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified 00612 /// value will trap if the value is dynamically null. PHIs keeps track of any 00613 /// phi nodes we've seen to avoid reprocessing them. 00614 static bool AllUsesOfValueWillTrapIfNull(const Value *V, 00615 SmallPtrSetImpl<const PHINode*> &PHIs) { 00616 for (const User *U : V->users()) 00617 if (isa<LoadInst>(U)) { 00618 // Will trap. 00619 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 00620 if (SI->getOperand(0) == V) { 00621 //cerr << "NONTRAPPING USE: " << *U; 00622 return false; // Storing the value. 00623 } 00624 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { 00625 if (CI->getCalledValue() != V) { 00626 //cerr << "NONTRAPPING USE: " << *U; 00627 return false; // Not calling the ptr 00628 } 00629 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) { 00630 if (II->getCalledValue() != V) { 00631 //cerr << "NONTRAPPING USE: " << *U; 00632 return false; // Not calling the ptr 00633 } 00634 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) { 00635 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; 00636 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 00637 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; 00638 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) { 00639 // If we've already seen this phi node, ignore it, it has already been 00640 // checked. 00641 if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs)) 00642 return false; 00643 } else if (isa<ICmpInst>(U) && 00644 isa<ConstantPointerNull>(U->getOperand(1))) { 00645 // Ignore icmp X, null 00646 } else { 00647 //cerr << "NONTRAPPING USE: " << *U; 00648 return false; 00649 } 00650 00651 return true; 00652 } 00653 00654 /// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads 00655 /// from GV will trap if the loaded value is null. Note that this also permits 00656 /// comparisons of the loaded value against null, as a special case. 00657 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) { 00658 for (const User *U : GV->users()) 00659 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 00660 SmallPtrSet<const PHINode*, 8> PHIs; 00661 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) 00662 return false; 00663 } else if (isa<StoreInst>(U)) { 00664 // Ignore stores to the global. 00665 } else { 00666 // We don't know or understand this user, bail out. 00667 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U; 00668 return false; 00669 } 00670 return true; 00671 } 00672 00673 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { 00674 bool Changed = false; 00675 for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) { 00676 Instruction *I = cast<Instruction>(*UI++); 00677 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 00678 LI->setOperand(0, NewV); 00679 Changed = true; 00680 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 00681 if (SI->getOperand(1) == V) { 00682 SI->setOperand(1, NewV); 00683 Changed = true; 00684 } 00685 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 00686 CallSite CS(I); 00687 if (CS.getCalledValue() == V) { 00688 // Calling through the pointer! Turn into a direct call, but be careful 00689 // that the pointer is not also being passed as an argument. 00690 CS.setCalledFunction(NewV); 00691 Changed = true; 00692 bool PassedAsArg = false; 00693 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 00694 if (CS.getArgument(i) == V) { 00695 PassedAsArg = true; 00696 CS.setArgument(i, NewV); 00697 } 00698 00699 if (PassedAsArg) { 00700 // Being passed as an argument also. Be careful to not invalidate UI! 00701 UI = V->user_begin(); 00702 } 00703 } 00704 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 00705 Changed |= OptimizeAwayTrappingUsesOfValue(CI, 00706 ConstantExpr::getCast(CI->getOpcode(), 00707 NewV, CI->getType())); 00708 if (CI->use_empty()) { 00709 Changed = true; 00710 CI->eraseFromParent(); 00711 } 00712 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 00713 // Should handle GEP here. 00714 SmallVector<Constant*, 8> Idxs; 00715 Idxs.reserve(GEPI->getNumOperands()-1); 00716 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); 00717 i != e; ++i) 00718 if (Constant *C = dyn_cast<Constant>(*i)) 00719 Idxs.push_back(C); 00720 else 00721 break; 00722 if (Idxs.size() == GEPI->getNumOperands()-1) 00723 Changed |= OptimizeAwayTrappingUsesOfValue(GEPI, 00724 ConstantExpr::getGetElementPtr(NewV, Idxs)); 00725 if (GEPI->use_empty()) { 00726 Changed = true; 00727 GEPI->eraseFromParent(); 00728 } 00729 } 00730 } 00731 00732 return Changed; 00733 } 00734 00735 00736 /// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null 00737 /// value stored into it. If there are uses of the loaded value that would trap 00738 /// if the loaded value is dynamically null, then we know that they cannot be 00739 /// reachable with a null optimize away the load. 00740 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, 00741 const DataLayout *DL, 00742 TargetLibraryInfo *TLI) { 00743 bool Changed = false; 00744 00745 // Keep track of whether we are able to remove all the uses of the global 00746 // other than the store that defines it. 00747 bool AllNonStoreUsesGone = true; 00748 00749 // Replace all uses of loads with uses of uses of the stored value. 00750 for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){ 00751 User *GlobalUser = *GUI++; 00752 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { 00753 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV); 00754 // If we were able to delete all uses of the loads 00755 if (LI->use_empty()) { 00756 LI->eraseFromParent(); 00757 Changed = true; 00758 } else { 00759 AllNonStoreUsesGone = false; 00760 } 00761 } else if (isa<StoreInst>(GlobalUser)) { 00762 // Ignore the store that stores "LV" to the global. 00763 assert(GlobalUser->getOperand(1) == GV && 00764 "Must be storing *to* the global"); 00765 } else { 00766 AllNonStoreUsesGone = false; 00767 00768 // If we get here we could have other crazy uses that are transitively 00769 // loaded. 00770 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || 00771 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || 00772 isa<BitCastInst>(GlobalUser) || 00773 isa<GetElementPtrInst>(GlobalUser)) && 00774 "Only expect load and stores!"); 00775 } 00776 } 00777 00778 if (Changed) { 00779 DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV); 00780 ++NumGlobUses; 00781 } 00782 00783 // If we nuked all of the loads, then none of the stores are needed either, 00784 // nor is the global. 00785 if (AllNonStoreUsesGone) { 00786 if (isLeakCheckerRoot(GV)) { 00787 Changed |= CleanupPointerRootUsers(GV, TLI); 00788 } else { 00789 Changed = true; 00790 CleanupConstantGlobalUsers(GV, nullptr, DL, TLI); 00791 } 00792 if (GV->use_empty()) { 00793 DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n"); 00794 Changed = true; 00795 GV->eraseFromParent(); 00796 ++NumDeleted; 00797 } 00798 } 00799 return Changed; 00800 } 00801 00802 /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the 00803 /// instructions that are foldable. 00804 static void ConstantPropUsersOf(Value *V, const DataLayout *DL, 00805 TargetLibraryInfo *TLI) { 00806 for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; ) 00807 if (Instruction *I = dyn_cast<Instruction>(*UI++)) 00808 if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) { 00809 I->replaceAllUsesWith(NewC); 00810 00811 // Advance UI to the next non-I use to avoid invalidating it! 00812 // Instructions could multiply use V. 00813 while (UI != E && *UI == I) 00814 ++UI; 00815 I->eraseFromParent(); 00816 } 00817 } 00818 00819 /// OptimizeGlobalAddressOfMalloc - This function takes the specified global 00820 /// variable, and transforms the program as if it always contained the result of 00821 /// the specified malloc. Because it is always the result of the specified 00822 /// malloc, there is no reason to actually DO the malloc. Instead, turn the 00823 /// malloc into a global, and any loads of GV as uses of the new global. 00824 static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, 00825 CallInst *CI, 00826 Type *AllocTy, 00827 ConstantInt *NElements, 00828 const DataLayout *DL, 00829 TargetLibraryInfo *TLI) { 00830 DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n'); 00831 00832 Type *GlobalType; 00833 if (NElements->getZExtValue() == 1) 00834 GlobalType = AllocTy; 00835 else 00836 // If we have an array allocation, the global variable is of an array. 00837 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue()); 00838 00839 // Create the new global variable. The contents of the malloc'd memory is 00840 // undefined, so initialize with an undef value. 00841 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(), 00842 GlobalType, false, 00843 GlobalValue::InternalLinkage, 00844 UndefValue::get(GlobalType), 00845 GV->getName()+".body", 00846 GV, 00847 GV->getThreadLocalMode()); 00848 00849 // If there are bitcast users of the malloc (which is typical, usually we have 00850 // a malloc + bitcast) then replace them with uses of the new global. Update 00851 // other users to use the global as well. 00852 BitCastInst *TheBC = nullptr; 00853 while (!CI->use_empty()) { 00854 Instruction *User = cast<Instruction>(CI->user_back()); 00855 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 00856 if (BCI->getType() == NewGV->getType()) { 00857 BCI->replaceAllUsesWith(NewGV); 00858 BCI->eraseFromParent(); 00859 } else { 00860 BCI->setOperand(0, NewGV); 00861 } 00862 } else { 00863 if (!TheBC) 00864 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI); 00865 User->replaceUsesOfWith(CI, TheBC); 00866 } 00867 } 00868 00869 Constant *RepValue = NewGV; 00870 if (NewGV->getType() != GV->getType()->getElementType()) 00871 RepValue = ConstantExpr::getBitCast(RepValue, 00872 GV->getType()->getElementType()); 00873 00874 // If there is a comparison against null, we will insert a global bool to 00875 // keep track of whether the global was initialized yet or not. 00876 GlobalVariable *InitBool = 00877 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false, 00878 GlobalValue::InternalLinkage, 00879 ConstantInt::getFalse(GV->getContext()), 00880 GV->getName()+".init", GV->getThreadLocalMode()); 00881 bool InitBoolUsed = false; 00882 00883 // Loop over all uses of GV, processing them in turn. 00884 while (!GV->use_empty()) { 00885 if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) { 00886 // The global is initialized when the store to it occurs. 00887 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0, 00888 SI->getOrdering(), SI->getSynchScope(), SI); 00889 SI->eraseFromParent(); 00890 continue; 00891 } 00892 00893 LoadInst *LI = cast<LoadInst>(GV->user_back()); 00894 while (!LI->use_empty()) { 00895 Use &LoadUse = *LI->use_begin(); 00896 ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser()); 00897 if (!ICI) { 00898 LoadUse = RepValue; 00899 continue; 00900 } 00901 00902 // Replace the cmp X, 0 with a use of the bool value. 00903 // Sink the load to where the compare was, if atomic rules allow us to. 00904 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0, 00905 LI->getOrdering(), LI->getSynchScope(), 00906 LI->isUnordered() ? (Instruction*)ICI : LI); 00907 InitBoolUsed = true; 00908 switch (ICI->getPredicate()) { 00909 default: llvm_unreachable("Unknown ICmp Predicate!"); 00910 case ICmpInst::ICMP_ULT: 00911 case ICmpInst::ICMP_SLT: // X < null -> always false 00912 LV = ConstantInt::getFalse(GV->getContext()); 00913 break; 00914 case ICmpInst::ICMP_ULE: 00915 case ICmpInst::ICMP_SLE: 00916 case ICmpInst::ICMP_EQ: 00917 LV = BinaryOperator::CreateNot(LV, "notinit", ICI); 00918 break; 00919 case ICmpInst::ICMP_NE: 00920 case ICmpInst::ICMP_UGE: 00921 case ICmpInst::ICMP_SGE: 00922 case ICmpInst::ICMP_UGT: 00923 case ICmpInst::ICMP_SGT: 00924 break; // no change. 00925 } 00926 ICI->replaceAllUsesWith(LV); 00927 ICI->eraseFromParent(); 00928 } 00929 LI->eraseFromParent(); 00930 } 00931 00932 // If the initialization boolean was used, insert it, otherwise delete it. 00933 if (!InitBoolUsed) { 00934 while (!InitBool->use_empty()) // Delete initializations 00935 cast<StoreInst>(InitBool->user_back())->eraseFromParent(); 00936 delete InitBool; 00937 } else 00938 GV->getParent()->getGlobalList().insert(GV, InitBool); 00939 00940 // Now the GV is dead, nuke it and the malloc.. 00941 GV->eraseFromParent(); 00942 CI->eraseFromParent(); 00943 00944 // To further other optimizations, loop over all users of NewGV and try to 00945 // constant prop them. This will promote GEP instructions with constant 00946 // indices into GEP constant-exprs, which will allow global-opt to hack on it. 00947 ConstantPropUsersOf(NewGV, DL, TLI); 00948 if (RepValue != NewGV) 00949 ConstantPropUsersOf(RepValue, DL, TLI); 00950 00951 return NewGV; 00952 } 00953 00954 /// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking 00955 /// to make sure that there are no complex uses of V. We permit simple things 00956 /// like dereferencing the pointer, but not storing through the address, unless 00957 /// it is to the specified global. 00958 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V, 00959 const GlobalVariable *GV, 00960 SmallPtrSetImpl<const PHINode*> &PHIs) { 00961 for (const User *U : V->users()) { 00962 const Instruction *Inst = cast<Instruction>(U); 00963 00964 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) { 00965 continue; // Fine, ignore. 00966 } 00967 00968 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 00969 if (SI->getOperand(0) == V && SI->getOperand(1) != GV) 00970 return false; // Storing the pointer itself... bad. 00971 continue; // Otherwise, storing through it, or storing into GV... fine. 00972 } 00973 00974 // Must index into the array and into the struct. 00975 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) { 00976 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) 00977 return false; 00978 continue; 00979 } 00980 00981 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) { 00982 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI 00983 // cycles. 00984 if (PHIs.insert(PN)) 00985 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs)) 00986 return false; 00987 continue; 00988 } 00989 00990 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) { 00991 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs)) 00992 return false; 00993 continue; 00994 } 00995 00996 return false; 00997 } 00998 return true; 00999 } 01000 01001 /// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV 01002 /// somewhere. Transform all uses of the allocation into loads from the 01003 /// global and uses of the resultant pointer. Further, delete the store into 01004 /// GV. This assumes that these value pass the 01005 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate. 01006 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc, 01007 GlobalVariable *GV) { 01008 while (!Alloc->use_empty()) { 01009 Instruction *U = cast<Instruction>(*Alloc->user_begin()); 01010 Instruction *InsertPt = U; 01011 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 01012 // If this is the store of the allocation into the global, remove it. 01013 if (SI->getOperand(1) == GV) { 01014 SI->eraseFromParent(); 01015 continue; 01016 } 01017 } else if (PHINode *PN = dyn_cast<PHINode>(U)) { 01018 // Insert the load in the corresponding predecessor, not right before the 01019 // PHI. 01020 InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator(); 01021 } else if (isa<BitCastInst>(U)) { 01022 // Must be bitcast between the malloc and store to initialize the global. 01023 ReplaceUsesOfMallocWithGlobal(U, GV); 01024 U->eraseFromParent(); 01025 continue; 01026 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 01027 // If this is a "GEP bitcast" and the user is a store to the global, then 01028 // just process it as a bitcast. 01029 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse()) 01030 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back())) 01031 if (SI->getOperand(1) == GV) { 01032 // Must be bitcast GEP between the malloc and store to initialize 01033 // the global. 01034 ReplaceUsesOfMallocWithGlobal(GEPI, GV); 01035 GEPI->eraseFromParent(); 01036 continue; 01037 } 01038 } 01039 01040 // Insert a load from the global, and use it instead of the malloc. 01041 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt); 01042 U->replaceUsesOfWith(Alloc, NL); 01043 } 01044 } 01045 01046 /// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi 01047 /// of a load) are simple enough to perform heap SRA on. This permits GEP's 01048 /// that index through the array and struct field, icmps of null, and PHIs. 01049 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V, 01050 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIs, 01051 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIsPerLoad) { 01052 // We permit two users of the load: setcc comparing against the null 01053 // pointer, and a getelementptr of a specific form. 01054 for (const User *U : V->users()) { 01055 const Instruction *UI = cast<Instruction>(U); 01056 01057 // Comparison against null is ok. 01058 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) { 01059 if (!isa<ConstantPointerNull>(ICI->getOperand(1))) 01060 return false; 01061 continue; 01062 } 01063 01064 // getelementptr is also ok, but only a simple form. 01065 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) { 01066 // Must index into the array and into the struct. 01067 if (GEPI->getNumOperands() < 3) 01068 return false; 01069 01070 // Otherwise the GEP is ok. 01071 continue; 01072 } 01073 01074 if (const PHINode *PN = dyn_cast<PHINode>(UI)) { 01075 if (!LoadUsingPHIsPerLoad.insert(PN)) 01076 // This means some phi nodes are dependent on each other. 01077 // Avoid infinite looping! 01078 return false; 01079 if (!LoadUsingPHIs.insert(PN)) 01080 // If we have already analyzed this PHI, then it is safe. 01081 continue; 01082 01083 // Make sure all uses of the PHI are simple enough to transform. 01084 if (!LoadUsesSimpleEnoughForHeapSRA(PN, 01085 LoadUsingPHIs, LoadUsingPHIsPerLoad)) 01086 return false; 01087 01088 continue; 01089 } 01090 01091 // Otherwise we don't know what this is, not ok. 01092 return false; 01093 } 01094 01095 return true; 01096 } 01097 01098 01099 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from 01100 /// GV are simple enough to perform HeapSRA, return true. 01101 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV, 01102 Instruction *StoredVal) { 01103 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs; 01104 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad; 01105 for (const User *U : GV->users()) 01106 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 01107 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs, 01108 LoadUsingPHIsPerLoad)) 01109 return false; 01110 LoadUsingPHIsPerLoad.clear(); 01111 } 01112 01113 // If we reach here, we know that all uses of the loads and transitive uses 01114 // (through PHI nodes) are simple enough to transform. However, we don't know 01115 // that all inputs the to the PHI nodes are in the same equivalence sets. 01116 // Check to verify that all operands of the PHIs are either PHIS that can be 01117 // transformed, loads from GV, or MI itself. 01118 for (const PHINode *PN : LoadUsingPHIs) { 01119 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) { 01120 Value *InVal = PN->getIncomingValue(op); 01121 01122 // PHI of the stored value itself is ok. 01123 if (InVal == StoredVal) continue; 01124 01125 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) { 01126 // One of the PHIs in our set is (optimistically) ok. 01127 if (LoadUsingPHIs.count(InPN)) 01128 continue; 01129 return false; 01130 } 01131 01132 // Load from GV is ok. 01133 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal)) 01134 if (LI->getOperand(0) == GV) 01135 continue; 01136 01137 // UNDEF? NULL? 01138 01139 // Anything else is rejected. 01140 return false; 01141 } 01142 } 01143 01144 return true; 01145 } 01146 01147 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, 01148 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 01149 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 01150 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V]; 01151 01152 if (FieldNo >= FieldVals.size()) 01153 FieldVals.resize(FieldNo+1); 01154 01155 // If we already have this value, just reuse the previously scalarized 01156 // version. 01157 if (Value *FieldVal = FieldVals[FieldNo]) 01158 return FieldVal; 01159 01160 // Depending on what instruction this is, we have several cases. 01161 Value *Result; 01162 if (LoadInst *LI = dyn_cast<LoadInst>(V)) { 01163 // This is a scalarized version of the load from the global. Just create 01164 // a new Load of the scalarized global. 01165 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo, 01166 InsertedScalarizedValues, 01167 PHIsToRewrite), 01168 LI->getName()+".f"+Twine(FieldNo), LI); 01169 } else if (PHINode *PN = dyn_cast<PHINode>(V)) { 01170 // PN's type is pointer to struct. Make a new PHI of pointer to struct 01171 // field. 01172 01173 PointerType *PTy = cast<PointerType>(PN->getType()); 01174 StructType *ST = cast<StructType>(PTy->getElementType()); 01175 01176 unsigned AS = PTy->getAddressSpace(); 01177 PHINode *NewPN = 01178 PHINode::Create(PointerType::get(ST->getElementType(FieldNo), AS), 01179 PN->getNumIncomingValues(), 01180 PN->getName()+".f"+Twine(FieldNo), PN); 01181 Result = NewPN; 01182 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo)); 01183 } else { 01184 llvm_unreachable("Unknown usable value"); 01185 } 01186 01187 return FieldVals[FieldNo] = Result; 01188 } 01189 01190 /// RewriteHeapSROALoadUser - Given a load instruction and a value derived from 01191 /// the load, rewrite the derived value to use the HeapSRoA'd load. 01192 static void RewriteHeapSROALoadUser(Instruction *LoadUser, 01193 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 01194 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 01195 // If this is a comparison against null, handle it. 01196 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { 01197 assert(isa<ConstantPointerNull>(SCI->getOperand(1))); 01198 // If we have a setcc of the loaded pointer, we can use a setcc of any 01199 // field. 01200 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0, 01201 InsertedScalarizedValues, PHIsToRewrite); 01202 01203 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr, 01204 Constant::getNullValue(NPtr->getType()), 01205 SCI->getName()); 01206 SCI->replaceAllUsesWith(New); 01207 SCI->eraseFromParent(); 01208 return; 01209 } 01210 01211 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...' 01212 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) { 01213 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) 01214 && "Unexpected GEPI!"); 01215 01216 // Load the pointer for this field. 01217 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 01218 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo, 01219 InsertedScalarizedValues, PHIsToRewrite); 01220 01221 // Create the new GEP idx vector. 01222 SmallVector<Value*, 8> GEPIdx; 01223 GEPIdx.push_back(GEPI->getOperand(1)); 01224 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); 01225 01226 Value *NGEPI = GetElementPtrInst::Create(NewPtr, GEPIdx, 01227 GEPI->getName(), GEPI); 01228 GEPI->replaceAllUsesWith(NGEPI); 01229 GEPI->eraseFromParent(); 01230 return; 01231 } 01232 01233 // Recursively transform the users of PHI nodes. This will lazily create the 01234 // PHIs that are needed for individual elements. Keep track of what PHIs we 01235 // see in InsertedScalarizedValues so that we don't get infinite loops (very 01236 // antisocial). If the PHI is already in InsertedScalarizedValues, it has 01237 // already been seen first by another load, so its uses have already been 01238 // processed. 01239 PHINode *PN = cast<PHINode>(LoadUser); 01240 if (!InsertedScalarizedValues.insert(std::make_pair(PN, 01241 std::vector<Value*>())).second) 01242 return; 01243 01244 // If this is the first time we've seen this PHI, recursively process all 01245 // users. 01246 for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) { 01247 Instruction *User = cast<Instruction>(*UI++); 01248 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 01249 } 01250 } 01251 01252 /// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr 01253 /// is a value loaded from the global. Eliminate all uses of Ptr, making them 01254 /// use FieldGlobals instead. All uses of loaded values satisfy 01255 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA. 01256 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, 01257 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 01258 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 01259 for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) { 01260 Instruction *User = cast<Instruction>(*UI++); 01261 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 01262 } 01263 01264 if (Load->use_empty()) { 01265 Load->eraseFromParent(); 01266 InsertedScalarizedValues.erase(Load); 01267 } 01268 } 01269 01270 /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break 01271 /// it up into multiple allocations of arrays of the fields. 01272 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, 01273 Value *NElems, const DataLayout *DL, 01274 const TargetLibraryInfo *TLI) { 01275 DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n'); 01276 Type *MAT = getMallocAllocatedType(CI, TLI); 01277 StructType *STy = cast<StructType>(MAT); 01278 01279 // There is guaranteed to be at least one use of the malloc (storing 01280 // it into GV). If there are other uses, change them to be uses of 01281 // the global to simplify later code. This also deletes the store 01282 // into GV. 01283 ReplaceUsesOfMallocWithGlobal(CI, GV); 01284 01285 // Okay, at this point, there are no users of the malloc. Insert N 01286 // new mallocs at the same place as CI, and N globals. 01287 std::vector<Value*> FieldGlobals; 01288 std::vector<Value*> FieldMallocs; 01289 01290 unsigned AS = GV->getType()->getPointerAddressSpace(); 01291 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ 01292 Type *FieldTy = STy->getElementType(FieldNo); 01293 PointerType *PFieldTy = PointerType::get(FieldTy, AS); 01294 01295 GlobalVariable *NGV = 01296 new GlobalVariable(*GV->getParent(), 01297 PFieldTy, false, GlobalValue::InternalLinkage, 01298 Constant::getNullValue(PFieldTy), 01299 GV->getName() + ".f" + Twine(FieldNo), GV, 01300 GV->getThreadLocalMode()); 01301 FieldGlobals.push_back(NGV); 01302 01303 unsigned TypeSize = DL->getTypeAllocSize(FieldTy); 01304 if (StructType *ST = dyn_cast<StructType>(FieldTy)) 01305 TypeSize = DL->getStructLayout(ST)->getSizeInBytes(); 01306 Type *IntPtrTy = DL->getIntPtrType(CI->getType()); 01307 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy, 01308 ConstantInt::get(IntPtrTy, TypeSize), 01309 NElems, nullptr, 01310 CI->getName() + ".f" + Twine(FieldNo)); 01311 FieldMallocs.push_back(NMI); 01312 new StoreInst(NMI, NGV, CI); 01313 } 01314 01315 // The tricky aspect of this transformation is handling the case when malloc 01316 // fails. In the original code, malloc failing would set the result pointer 01317 // of malloc to null. In this case, some mallocs could succeed and others 01318 // could fail. As such, we emit code that looks like this: 01319 // F0 = malloc(field0) 01320 // F1 = malloc(field1) 01321 // F2 = malloc(field2) 01322 // if (F0 == 0 || F1 == 0 || F2 == 0) { 01323 // if (F0) { free(F0); F0 = 0; } 01324 // if (F1) { free(F1); F1 = 0; } 01325 // if (F2) { free(F2); F2 = 0; } 01326 // } 01327 // The malloc can also fail if its argument is too large. 01328 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0); 01329 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0), 01330 ConstantZero, "isneg"); 01331 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { 01332 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i], 01333 Constant::getNullValue(FieldMallocs[i]->getType()), 01334 "isnull"); 01335 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI); 01336 } 01337 01338 // Split the basic block at the old malloc. 01339 BasicBlock *OrigBB = CI->getParent(); 01340 BasicBlock *ContBB = OrigBB->splitBasicBlock(CI, "malloc_cont"); 01341 01342 // Create the block to check the first condition. Put all these blocks at the 01343 // end of the function as they are unlikely to be executed. 01344 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(), 01345 "malloc_ret_null", 01346 OrigBB->getParent()); 01347 01348 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond 01349 // branch on RunningOr. 01350 OrigBB->getTerminator()->eraseFromParent(); 01351 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); 01352 01353 // Within the NullPtrBlock, we need to emit a comparison and branch for each 01354 // pointer, because some may be null while others are not. 01355 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 01356 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock); 01357 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, 01358 Constant::getNullValue(GVVal->getType())); 01359 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it", 01360 OrigBB->getParent()); 01361 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next", 01362 OrigBB->getParent()); 01363 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock, 01364 Cmp, NullPtrBlock); 01365 01366 // Fill in FreeBlock. 01367 CallInst::CreateFree(GVVal, BI); 01368 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i], 01369 FreeBlock); 01370 BranchInst::Create(NextBlock, FreeBlock); 01371 01372 NullPtrBlock = NextBlock; 01373 } 01374 01375 BranchInst::Create(ContBB, NullPtrBlock); 01376 01377 // CI is no longer needed, remove it. 01378 CI->eraseFromParent(); 01379 01380 /// InsertedScalarizedLoads - As we process loads, if we can't immediately 01381 /// update all uses of the load, keep track of what scalarized loads are 01382 /// inserted for a given load. 01383 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues; 01384 InsertedScalarizedValues[GV] = FieldGlobals; 01385 01386 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite; 01387 01388 // Okay, the malloc site is completely handled. All of the uses of GV are now 01389 // loads, and all uses of those loads are simple. Rewrite them to use loads 01390 // of the per-field globals instead. 01391 for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) { 01392 Instruction *User = cast<Instruction>(*UI++); 01393 01394 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 01395 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite); 01396 continue; 01397 } 01398 01399 // Must be a store of null. 01400 StoreInst *SI = cast<StoreInst>(User); 01401 assert(isa<ConstantPointerNull>(SI->getOperand(0)) && 01402 "Unexpected heap-sra user!"); 01403 01404 // Insert a store of null into each global. 01405 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 01406 PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType()); 01407 Constant *Null = Constant::getNullValue(PT->getElementType()); 01408 new StoreInst(Null, FieldGlobals[i], SI); 01409 } 01410 // Erase the original store. 01411 SI->eraseFromParent(); 01412 } 01413 01414 // While we have PHIs that are interesting to rewrite, do it. 01415 while (!PHIsToRewrite.empty()) { 01416 PHINode *PN = PHIsToRewrite.back().first; 01417 unsigned FieldNo = PHIsToRewrite.back().second; 01418 PHIsToRewrite.pop_back(); 01419 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]); 01420 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"); 01421 01422 // Add all the incoming values. This can materialize more phis. 01423 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 01424 Value *InVal = PN->getIncomingValue(i); 01425 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, 01426 PHIsToRewrite); 01427 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); 01428 } 01429 } 01430 01431 // Drop all inter-phi links and any loads that made it this far. 01432 for (DenseMap<Value*, std::vector<Value*> >::iterator 01433 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 01434 I != E; ++I) { 01435 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 01436 PN->dropAllReferences(); 01437 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 01438 LI->dropAllReferences(); 01439 } 01440 01441 // Delete all the phis and loads now that inter-references are dead. 01442 for (DenseMap<Value*, std::vector<Value*> >::iterator 01443 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 01444 I != E; ++I) { 01445 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 01446 PN->eraseFromParent(); 01447 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 01448 LI->eraseFromParent(); 01449 } 01450 01451 // The old global is now dead, remove it. 01452 GV->eraseFromParent(); 01453 01454 ++NumHeapSRA; 01455 return cast<GlobalVariable>(FieldGlobals[0]); 01456 } 01457 01458 /// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a 01459 /// pointer global variable with a single value stored it that is a malloc or 01460 /// cast of malloc. 01461 static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, 01462 CallInst *CI, 01463 Type *AllocTy, 01464 AtomicOrdering Ordering, 01465 Module::global_iterator &GVI, 01466 const DataLayout *DL, 01467 TargetLibraryInfo *TLI) { 01468 if (!DL) 01469 return false; 01470 01471 // If this is a malloc of an abstract type, don't touch it. 01472 if (!AllocTy->isSized()) 01473 return false; 01474 01475 // We can't optimize this global unless all uses of it are *known* to be 01476 // of the malloc value, not of the null initializer value (consider a use 01477 // that compares the global's value against zero to see if the malloc has 01478 // been reached). To do this, we check to see if all uses of the global 01479 // would trap if the global were null: this proves that they must all 01480 // happen after the malloc. 01481 if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) 01482 return false; 01483 01484 // We can't optimize this if the malloc itself is used in a complex way, 01485 // for example, being stored into multiple globals. This allows the 01486 // malloc to be stored into the specified global, loaded icmp'd, and 01487 // GEP'd. These are all things we could transform to using the global 01488 // for. 01489 SmallPtrSet<const PHINode*, 8> PHIs; 01490 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs)) 01491 return false; 01492 01493 // If we have a global that is only initialized with a fixed size malloc, 01494 // transform the program to use global memory instead of malloc'd memory. 01495 // This eliminates dynamic allocation, avoids an indirection accessing the 01496 // data, and exposes the resultant global to further GlobalOpt. 01497 // We cannot optimize the malloc if we cannot determine malloc array size. 01498 Value *NElems = getMallocArraySize(CI, DL, TLI, true); 01499 if (!NElems) 01500 return false; 01501 01502 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 01503 // Restrict this transformation to only working on small allocations 01504 // (2048 bytes currently), as we don't want to introduce a 16M global or 01505 // something. 01506 if (NElements->getZExtValue() * DL->getTypeAllocSize(AllocTy) < 2048) { 01507 GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI); 01508 return true; 01509 } 01510 01511 // If the allocation is an array of structures, consider transforming this 01512 // into multiple malloc'd arrays, one for each field. This is basically 01513 // SRoA for malloc'd memory. 01514 01515 if (Ordering != NotAtomic) 01516 return false; 01517 01518 // If this is an allocation of a fixed size array of structs, analyze as a 01519 // variable size array. malloc [100 x struct],1 -> malloc struct, 100 01520 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) 01521 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) 01522 AllocTy = AT->getElementType(); 01523 01524 StructType *AllocSTy = dyn_cast<StructType>(AllocTy); 01525 if (!AllocSTy) 01526 return false; 01527 01528 // This the structure has an unreasonable number of fields, leave it 01529 // alone. 01530 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && 01531 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) { 01532 01533 // If this is a fixed size array, transform the Malloc to be an alloc of 01534 // structs. malloc [100 x struct],1 -> malloc struct, 100 01535 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) { 01536 Type *IntPtrTy = DL->getIntPtrType(CI->getType()); 01537 unsigned TypeSize = DL->getStructLayout(AllocSTy)->getSizeInBytes(); 01538 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); 01539 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements()); 01540 Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, 01541 AllocSize, NumElements, 01542 nullptr, CI->getName()); 01543 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI); 01544 CI->replaceAllUsesWith(Cast); 01545 CI->eraseFromParent(); 01546 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc)) 01547 CI = cast<CallInst>(BCI->getOperand(0)); 01548 else 01549 CI = cast<CallInst>(Malloc); 01550 } 01551 01552 GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true), 01553 DL, TLI); 01554 return true; 01555 } 01556 01557 return false; 01558 } 01559 01560 // OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge 01561 // that only one value (besides its initializer) is ever stored to the global. 01562 static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, 01563 AtomicOrdering Ordering, 01564 Module::global_iterator &GVI, 01565 const DataLayout *DL, 01566 TargetLibraryInfo *TLI) { 01567 // Ignore no-op GEPs and bitcasts. 01568 StoredOnceVal = StoredOnceVal->stripPointerCasts(); 01569 01570 // If we are dealing with a pointer global that is initialized to null and 01571 // only has one (non-null) value stored into it, then we can optimize any 01572 // users of the loaded value (often calls and loads) that would trap if the 01573 // value was null. 01574 if (GV->getInitializer()->getType()->isPointerTy() && 01575 GV->getInitializer()->isNullValue()) { 01576 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { 01577 if (GV->getInitializer()->getType() != SOVC->getType()) 01578 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType()); 01579 01580 // Optimize away any trapping uses of the loaded value. 01581 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI)) 01582 return true; 01583 } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) { 01584 Type *MallocType = getMallocAllocatedType(CI, TLI); 01585 if (MallocType && 01586 TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI, 01587 DL, TLI)) 01588 return true; 01589 } 01590 } 01591 01592 return false; 01593 } 01594 01595 /// TryToShrinkGlobalToBoolean - At this point, we have learned that the only 01596 /// two values ever stored into GV are its initializer and OtherVal. See if we 01597 /// can shrink the global into a boolean and select between the two values 01598 /// whenever it is used. This exposes the values to other scalar optimizations. 01599 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { 01600 Type *GVElType = GV->getType()->getElementType(); 01601 01602 // If GVElType is already i1, it is already shrunk. If the type of the GV is 01603 // an FP value, pointer or vector, don't do this optimization because a select 01604 // between them is very expensive and unlikely to lead to later 01605 // simplification. In these cases, we typically end up with "cond ? v1 : v2" 01606 // where v1 and v2 both require constant pool loads, a big loss. 01607 if (GVElType == Type::getInt1Ty(GV->getContext()) || 01608 GVElType->isFloatingPointTy() || 01609 GVElType->isPointerTy() || GVElType->isVectorTy()) 01610 return false; 01611 01612 // Walk the use list of the global seeing if all the uses are load or store. 01613 // If there is anything else, bail out. 01614 for (User *U : GV->users()) 01615 if (!isa<LoadInst>(U) && !isa<StoreInst>(U)) 01616 return false; 01617 01618 DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV); 01619 01620 // Create the new global, initializing it to false. 01621 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()), 01622 false, 01623 GlobalValue::InternalLinkage, 01624 ConstantInt::getFalse(GV->getContext()), 01625 GV->getName()+".b", 01626 GV->getThreadLocalMode(), 01627 GV->getType()->getAddressSpace()); 01628 GV->getParent()->getGlobalList().insert(GV, NewGV); 01629 01630 Constant *InitVal = GV->getInitializer(); 01631 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) && 01632 "No reason to shrink to bool!"); 01633 01634 // If initialized to zero and storing one into the global, we can use a cast 01635 // instead of a select to synthesize the desired value. 01636 bool IsOneZero = false; 01637 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) 01638 IsOneZero = InitVal->isNullValue() && CI->isOne(); 01639 01640 while (!GV->use_empty()) { 01641 Instruction *UI = cast<Instruction>(GV->user_back()); 01642 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 01643 // Change the store into a boolean store. 01644 bool StoringOther = SI->getOperand(0) == OtherVal; 01645 // Only do this if we weren't storing a loaded value. 01646 Value *StoreVal; 01647 if (StoringOther || SI->getOperand(0) == InitVal) { 01648 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()), 01649 StoringOther); 01650 } else { 01651 // Otherwise, we are storing a previously loaded copy. To do this, 01652 // change the copy from copying the original value to just copying the 01653 // bool. 01654 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); 01655 01656 // If we've already replaced the input, StoredVal will be a cast or 01657 // select instruction. If not, it will be a load of the original 01658 // global. 01659 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 01660 assert(LI->getOperand(0) == GV && "Not a copy!"); 01661 // Insert a new load, to preserve the saved value. 01662 StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0, 01663 LI->getOrdering(), LI->getSynchScope(), LI); 01664 } else { 01665 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && 01666 "This is not a form that we understand!"); 01667 StoreVal = StoredVal->getOperand(0); 01668 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!"); 01669 } 01670 } 01671 new StoreInst(StoreVal, NewGV, false, 0, 01672 SI->getOrdering(), SI->getSynchScope(), SI); 01673 } else { 01674 // Change the load into a load of bool then a select. 01675 LoadInst *LI = cast<LoadInst>(UI); 01676 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0, 01677 LI->getOrdering(), LI->getSynchScope(), LI); 01678 Value *NSI; 01679 if (IsOneZero) 01680 NSI = new ZExtInst(NLI, LI->getType(), "", LI); 01681 else 01682 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); 01683 NSI->takeName(LI); 01684 LI->replaceAllUsesWith(NSI); 01685 } 01686 UI->eraseFromParent(); 01687 } 01688 01689 // Retain the name of the old global variable. People who are debugging their 01690 // programs may expect these variables to be named the same. 01691 NewGV->takeName(GV); 01692 GV->eraseFromParent(); 01693 return true; 01694 } 01695 01696 01697 /// ProcessGlobal - Analyze the specified global variable and optimize it if 01698 /// possible. If we make a change, return true. 01699 bool GlobalOpt::ProcessGlobal(GlobalVariable *GV, 01700 Module::global_iterator &GVI) { 01701 // Do more involved optimizations if the global is internal. 01702 GV->removeDeadConstantUsers(); 01703 01704 if (GV->use_empty()) { 01705 DEBUG(dbgs() << "GLOBAL DEAD: " << *GV); 01706 GV->eraseFromParent(); 01707 ++NumDeleted; 01708 return true; 01709 } 01710 01711 if (!GV->hasLocalLinkage()) 01712 return false; 01713 01714 GlobalStatus GS; 01715 01716 if (GlobalStatus::analyzeGlobal(GV, GS)) 01717 return false; 01718 01719 if (!GS.IsCompared && !GV->hasUnnamedAddr()) { 01720 GV->setUnnamedAddr(true); 01721 NumUnnamed++; 01722 } 01723 01724 if (GV->isConstant() || !GV->hasInitializer()) 01725 return false; 01726 01727 return ProcessInternalGlobal(GV, GVI, GS); 01728 } 01729 01730 /// ProcessInternalGlobal - Analyze the specified global variable and optimize 01731 /// it if possible. If we make a change, return true. 01732 bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, 01733 Module::global_iterator &GVI, 01734 const GlobalStatus &GS) { 01735 // If this is a first class global and has only one accessing function 01736 // and this function is main (which we know is not recursive), we replace 01737 // the global with a local alloca in this function. 01738 // 01739 // NOTE: It doesn't make sense to promote non-single-value types since we 01740 // are just replacing static memory to stack memory. 01741 // 01742 // If the global is in different address space, don't bring it to stack. 01743 if (!GS.HasMultipleAccessingFunctions && 01744 GS.AccessingFunction && !GS.HasNonInstructionUser && 01745 GV->getType()->getElementType()->isSingleValueType() && 01746 GS.AccessingFunction->getName() == "main" && 01747 GS.AccessingFunction->hasExternalLinkage() && 01748 GV->getType()->getAddressSpace() == 0) { 01749 DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV); 01750 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction 01751 ->getEntryBlock().begin()); 01752 Type *ElemTy = GV->getType()->getElementType(); 01753 // FIXME: Pass Global's alignment when globals have alignment 01754 AllocaInst *Alloca = new AllocaInst(ElemTy, nullptr, 01755 GV->getName(), &FirstI); 01756 if (!isa<UndefValue>(GV->getInitializer())) 01757 new StoreInst(GV->getInitializer(), Alloca, &FirstI); 01758 01759 GV->replaceAllUsesWith(Alloca); 01760 GV->eraseFromParent(); 01761 ++NumLocalized; 01762 return true; 01763 } 01764 01765 // If the global is never loaded (but may be stored to), it is dead. 01766 // Delete it now. 01767 if (!GS.IsLoaded) { 01768 DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV); 01769 01770 bool Changed; 01771 if (isLeakCheckerRoot(GV)) { 01772 // Delete any constant stores to the global. 01773 Changed = CleanupPointerRootUsers(GV, TLI); 01774 } else { 01775 // Delete any stores we can find to the global. We may not be able to 01776 // make it completely dead though. 01777 Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 01778 } 01779 01780 // If the global is dead now, delete it. 01781 if (GV->use_empty()) { 01782 GV->eraseFromParent(); 01783 ++NumDeleted; 01784 Changed = true; 01785 } 01786 return Changed; 01787 01788 } else if (GS.StoredType <= GlobalStatus::InitializerStored) { 01789 DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n"); 01790 GV->setConstant(true); 01791 01792 // Clean up any obviously simplifiable users now. 01793 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 01794 01795 // If the global is dead now, just nuke it. 01796 if (GV->use_empty()) { 01797 DEBUG(dbgs() << " *** Marking constant allowed us to simplify " 01798 << "all users and delete global!\n"); 01799 GV->eraseFromParent(); 01800 ++NumDeleted; 01801 } 01802 01803 ++NumMarked; 01804 return true; 01805 } else if (!GV->getInitializer()->getType()->isSingleValueType()) { 01806 if (DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>()) { 01807 const DataLayout &DL = DLP->getDataLayout(); 01808 if (GlobalVariable *FirstNewGV = SRAGlobal(GV, DL)) { 01809 GVI = FirstNewGV; // Don't skip the newly produced globals! 01810 return true; 01811 } 01812 } 01813 } else if (GS.StoredType == GlobalStatus::StoredOnce) { 01814 // If the initial value for the global was an undef value, and if only 01815 // one other value was stored into it, we can just change the 01816 // initializer to be the stored value, then delete all stores to the 01817 // global. This allows us to mark it constant. 01818 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 01819 if (isa<UndefValue>(GV->getInitializer())) { 01820 // Change the initial value here. 01821 GV->setInitializer(SOVConstant); 01822 01823 // Clean up any obviously simplifiable users now. 01824 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 01825 01826 if (GV->use_empty()) { 01827 DEBUG(dbgs() << " *** Substituting initializer allowed us to " 01828 << "simplify all users and delete global!\n"); 01829 GV->eraseFromParent(); 01830 ++NumDeleted; 01831 } else { 01832 GVI = GV; 01833 } 01834 ++NumSubstitute; 01835 return true; 01836 } 01837 01838 // Try to optimize globals based on the knowledge that only one value 01839 // (besides its initializer) is ever stored to the global. 01840 if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI, 01841 DL, TLI)) 01842 return true; 01843 01844 // Otherwise, if the global was not a boolean, we can shrink it to be a 01845 // boolean. 01846 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) { 01847 if (GS.Ordering == NotAtomic) { 01848 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { 01849 ++NumShrunkToBool; 01850 return true; 01851 } 01852 } 01853 } 01854 } 01855 01856 return false; 01857 } 01858 01859 /// ChangeCalleesToFastCall - Walk all of the direct calls of the specified 01860 /// function, changing them to FastCC. 01861 static void ChangeCalleesToFastCall(Function *F) { 01862 for (User *U : F->users()) { 01863 if (isa<BlockAddress>(U)) 01864 continue; 01865 CallSite CS(cast<Instruction>(U)); 01866 CS.setCallingConv(CallingConv::Fast); 01867 } 01868 } 01869 01870 static AttributeSet StripNest(LLVMContext &C, const AttributeSet &Attrs) { 01871 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { 01872 unsigned Index = Attrs.getSlotIndex(i); 01873 if (!Attrs.getSlotAttributes(i).hasAttribute(Index, Attribute::Nest)) 01874 continue; 01875 01876 // There can be only one. 01877 return Attrs.removeAttribute(C, Index, Attribute::Nest); 01878 } 01879 01880 return Attrs; 01881 } 01882 01883 static void RemoveNestAttribute(Function *F) { 01884 F->setAttributes(StripNest(F->getContext(), F->getAttributes())); 01885 for (User *U : F->users()) { 01886 if (isa<BlockAddress>(U)) 01887 continue; 01888 CallSite CS(cast<Instruction>(U)); 01889 CS.setAttributes(StripNest(F->getContext(), CS.getAttributes())); 01890 } 01891 } 01892 01893 /// Return true if this is a calling convention that we'd like to change. The 01894 /// idea here is that we don't want to mess with the convention if the user 01895 /// explicitly requested something with performance implications like coldcc, 01896 /// GHC, or anyregcc. 01897 static bool isProfitableToMakeFastCC(Function *F) { 01898 CallingConv::ID CC = F->getCallingConv(); 01899 // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc? 01900 return CC == CallingConv::C || CC == CallingConv::X86_ThisCall; 01901 } 01902 01903 bool GlobalOpt::OptimizeFunctions(Module &M) { 01904 bool Changed = false; 01905 // Optimize functions. 01906 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { 01907 Function *F = FI++; 01908 // Functions without names cannot be referenced outside this module. 01909 if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage()) 01910 F->setLinkage(GlobalValue::InternalLinkage); 01911 F->removeDeadConstantUsers(); 01912 if (F->isDefTriviallyDead()) { 01913 F->eraseFromParent(); 01914 Changed = true; 01915 ++NumFnDeleted; 01916 } else if (F->hasLocalLinkage()) { 01917 if (isProfitableToMakeFastCC(F) && !F->isVarArg() && 01918 !F->hasAddressTaken()) { 01919 // If this function has a calling convention worth changing, is not a 01920 // varargs function, and is only called directly, promote it to use the 01921 // Fast calling convention. 01922 F->setCallingConv(CallingConv::Fast); 01923 ChangeCalleesToFastCall(F); 01924 ++NumFastCallFns; 01925 Changed = true; 01926 } 01927 01928 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && 01929 !F->hasAddressTaken()) { 01930 // The function is not used by a trampoline intrinsic, so it is safe 01931 // to remove the 'nest' attribute. 01932 RemoveNestAttribute(F); 01933 ++NumNestRemoved; 01934 Changed = true; 01935 } 01936 } 01937 } 01938 return Changed; 01939 } 01940 01941 bool GlobalOpt::OptimizeGlobalVars(Module &M) { 01942 bool Changed = false; 01943 01944 SmallSet<const Comdat *, 8> NotDiscardableComdats; 01945 for (const GlobalVariable &GV : M.globals()) 01946 if (const Comdat *C = GV.getComdat()) 01947 if (!GV.isDiscardableIfUnused()) 01948 NotDiscardableComdats.insert(C); 01949 01950 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); 01951 GVI != E; ) { 01952 GlobalVariable *GV = GVI++; 01953 // Global variables without names cannot be referenced outside this module. 01954 if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage()) 01955 GV->setLinkage(GlobalValue::InternalLinkage); 01956 // Simplify the initializer. 01957 if (GV->hasInitializer()) 01958 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) { 01959 Constant *New = ConstantFoldConstantExpression(CE, DL, TLI); 01960 if (New && New != CE) 01961 GV->setInitializer(New); 01962 } 01963 01964 if (GV->isDiscardableIfUnused()) { 01965 if (const Comdat *C = GV->getComdat()) 01966 if (NotDiscardableComdats.count(C)) 01967 continue; 01968 Changed |= ProcessGlobal(GV, GVI); 01969 } 01970 } 01971 return Changed; 01972 } 01973 01974 static inline bool 01975 isSimpleEnoughValueToCommit(Constant *C, 01976 SmallPtrSetImpl<Constant*> &SimpleConstants, 01977 const DataLayout *DL); 01978 01979 01980 /// isSimpleEnoughValueToCommit - Return true if the specified constant can be 01981 /// handled by the code generator. We don't want to generate something like: 01982 /// void *X = &X/42; 01983 /// because the code generator doesn't have a relocation that can handle that. 01984 /// 01985 /// This function should be called if C was not found (but just got inserted) 01986 /// in SimpleConstants to avoid having to rescan the same constants all the 01987 /// time. 01988 static bool isSimpleEnoughValueToCommitHelper(Constant *C, 01989 SmallPtrSetImpl<Constant*> &SimpleConstants, 01990 const DataLayout *DL) { 01991 // Simple global addresses are supported, do not allow dllimport or 01992 // thread-local globals. 01993 if (auto *GV = dyn_cast<GlobalValue>(C)) 01994 return !GV->hasDLLImportStorageClass() && !GV->isThreadLocal(); 01995 01996 // Simple integer, undef, constant aggregate zero, etc are all supported. 01997 if (C->getNumOperands() == 0 || isa<BlockAddress>(C)) 01998 return true; 01999 02000 // Aggregate values are safe if all their elements are. 02001 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) || 02002 isa<ConstantVector>(C)) { 02003 for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) { 02004 Constant *Op = cast<Constant>(C->getOperand(i)); 02005 if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, DL)) 02006 return false; 02007 } 02008 return true; 02009 } 02010 02011 // We don't know exactly what relocations are allowed in constant expressions, 02012 // so we allow &global+constantoffset, which is safe and uniformly supported 02013 // across targets. 02014 ConstantExpr *CE = cast<ConstantExpr>(C); 02015 switch (CE->getOpcode()) { 02016 case Instruction::BitCast: 02017 // Bitcast is fine if the casted value is fine. 02018 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 02019 02020 case Instruction::IntToPtr: 02021 case Instruction::PtrToInt: 02022 // int <=> ptr is fine if the int type is the same size as the 02023 // pointer type. 02024 if (!DL || DL->getTypeSizeInBits(CE->getType()) != 02025 DL->getTypeSizeInBits(CE->getOperand(0)->getType())) 02026 return false; 02027 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 02028 02029 // GEP is fine if it is simple + constant offset. 02030 case Instruction::GetElementPtr: 02031 for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i) 02032 if (!isa<ConstantInt>(CE->getOperand(i))) 02033 return false; 02034 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 02035 02036 case Instruction::Add: 02037 // We allow simple+cst. 02038 if (!isa<ConstantInt>(CE->getOperand(1))) 02039 return false; 02040 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 02041 } 02042 return false; 02043 } 02044 02045 static inline bool 02046 isSimpleEnoughValueToCommit(Constant *C, 02047 SmallPtrSetImpl<Constant*> &SimpleConstants, 02048 const DataLayout *DL) { 02049 // If we already checked this constant, we win. 02050 if (!SimpleConstants.insert(C)) return true; 02051 // Check the constant. 02052 return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL); 02053 } 02054 02055 02056 /// isSimpleEnoughPointerToCommit - Return true if this constant is simple 02057 /// enough for us to understand. In particular, if it is a cast to anything 02058 /// other than from one pointer type to another pointer type, we punt. 02059 /// We basically just support direct accesses to globals and GEP's of 02060 /// globals. This should be kept up to date with CommitValueTo. 02061 static bool isSimpleEnoughPointerToCommit(Constant *C) { 02062 // Conservatively, avoid aggregate types. This is because we don't 02063 // want to worry about them partially overlapping other stores. 02064 if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType()) 02065 return false; 02066 02067 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) 02068 // Do not allow weak/*_odr/linkonce linkage or external globals. 02069 return GV->hasUniqueInitializer(); 02070 02071 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 02072 // Handle a constantexpr gep. 02073 if (CE->getOpcode() == Instruction::GetElementPtr && 02074 isa<GlobalVariable>(CE->getOperand(0)) && 02075 cast<GEPOperator>(CE)->isInBounds()) { 02076 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 02077 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 02078 // external globals. 02079 if (!GV->hasUniqueInitializer()) 02080 return false; 02081 02082 // The first index must be zero. 02083 ConstantInt *CI = dyn_cast<ConstantInt>(*std::next(CE->op_begin())); 02084 if (!CI || !CI->isZero()) return false; 02085 02086 // The remaining indices must be compile-time known integers within the 02087 // notional bounds of the corresponding static array types. 02088 if (!CE->isGEPWithNoNotionalOverIndexing()) 02089 return false; 02090 02091 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); 02092 02093 // A constantexpr bitcast from a pointer to another pointer is a no-op, 02094 // and we know how to evaluate it by moving the bitcast from the pointer 02095 // operand to the value operand. 02096 } else if (CE->getOpcode() == Instruction::BitCast && 02097 isa<GlobalVariable>(CE->getOperand(0))) { 02098 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 02099 // external globals. 02100 return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer(); 02101 } 02102 } 02103 02104 return false; 02105 } 02106 02107 /// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global 02108 /// initializer. This returns 'Init' modified to reflect 'Val' stored into it. 02109 /// At this point, the GEP operands of Addr [0, OpNo) have been stepped into. 02110 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, 02111 ConstantExpr *Addr, unsigned OpNo) { 02112 // Base case of the recursion. 02113 if (OpNo == Addr->getNumOperands()) { 02114 assert(Val->getType() == Init->getType() && "Type mismatch!"); 02115 return Val; 02116 } 02117 02118 SmallVector<Constant*, 32> Elts; 02119 if (StructType *STy = dyn_cast<StructType>(Init->getType())) { 02120 // Break up the constant into its elements. 02121 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 02122 Elts.push_back(Init->getAggregateElement(i)); 02123 02124 // Replace the element that we are supposed to. 02125 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo)); 02126 unsigned Idx = CU->getZExtValue(); 02127 assert(Idx < STy->getNumElements() && "Struct index out of range!"); 02128 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1); 02129 02130 // Return the modified struct. 02131 return ConstantStruct::get(STy, Elts); 02132 } 02133 02134 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); 02135 SequentialType *InitTy = cast<SequentialType>(Init->getType()); 02136 02137 uint64_t NumElts; 02138 if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy)) 02139 NumElts = ATy->getNumElements(); 02140 else 02141 NumElts = InitTy->getVectorNumElements(); 02142 02143 // Break up the array into elements. 02144 for (uint64_t i = 0, e = NumElts; i != e; ++i) 02145 Elts.push_back(Init->getAggregateElement(i)); 02146 02147 assert(CI->getZExtValue() < NumElts); 02148 Elts[CI->getZExtValue()] = 02149 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1); 02150 02151 if (Init->getType()->isArrayTy()) 02152 return ConstantArray::get(cast<ArrayType>(InitTy), Elts); 02153 return ConstantVector::get(Elts); 02154 } 02155 02156 /// CommitValueTo - We have decided that Addr (which satisfies the predicate 02157 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. 02158 static void CommitValueTo(Constant *Val, Constant *Addr) { 02159 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 02160 assert(GV->hasInitializer()); 02161 GV->setInitializer(Val); 02162 return; 02163 } 02164 02165 ConstantExpr *CE = cast<ConstantExpr>(Addr); 02166 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 02167 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2)); 02168 } 02169 02170 namespace { 02171 02172 /// Evaluator - This class evaluates LLVM IR, producing the Constant 02173 /// representing each SSA instruction. Changes to global variables are stored 02174 /// in a mapping that can be iterated over after the evaluation is complete. 02175 /// Once an evaluation call fails, the evaluation object should not be reused. 02176 class Evaluator { 02177 public: 02178 Evaluator(const DataLayout *DL, const TargetLibraryInfo *TLI) 02179 : DL(DL), TLI(TLI) { 02180 ValueStack.emplace_back(); 02181 } 02182 02183 ~Evaluator() { 02184 for (auto &Tmp : AllocaTmps) 02185 // If there are still users of the alloca, the program is doing something 02186 // silly, e.g. storing the address of the alloca somewhere and using it 02187 // later. Since this is undefined, we'll just make it be null. 02188 if (!Tmp->use_empty()) 02189 Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType())); 02190 } 02191 02192 /// EvaluateFunction - Evaluate a call to function F, returning true if 02193 /// successful, false if we can't evaluate it. ActualArgs contains the formal 02194 /// arguments for the function. 02195 bool EvaluateFunction(Function *F, Constant *&RetVal, 02196 const SmallVectorImpl<Constant*> &ActualArgs); 02197 02198 /// EvaluateBlock - Evaluate all instructions in block BB, returning true if 02199 /// successful, false if we can't evaluate it. NewBB returns the next BB that 02200 /// control flows into, or null upon return. 02201 bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB); 02202 02203 Constant *getVal(Value *V) { 02204 if (Constant *CV = dyn_cast<Constant>(V)) return CV; 02205 Constant *R = ValueStack.back().lookup(V); 02206 assert(R && "Reference to an uncomputed value!"); 02207 return R; 02208 } 02209 02210 void setVal(Value *V, Constant *C) { 02211 ValueStack.back()[V] = C; 02212 } 02213 02214 const DenseMap<Constant*, Constant*> &getMutatedMemory() const { 02215 return MutatedMemory; 02216 } 02217 02218 const SmallPtrSetImpl<GlobalVariable*> &getInvariants() const { 02219 return Invariants; 02220 } 02221 02222 private: 02223 Constant *ComputeLoadResult(Constant *P); 02224 02225 /// ValueStack - As we compute SSA register values, we store their contents 02226 /// here. The back of the deque contains the current function and the stack 02227 /// contains the values in the calling frames. 02228 std::deque<DenseMap<Value*, Constant*>> ValueStack; 02229 02230 /// CallStack - This is used to detect recursion. In pathological situations 02231 /// we could hit exponential behavior, but at least there is nothing 02232 /// unbounded. 02233 SmallVector<Function*, 4> CallStack; 02234 02235 /// MutatedMemory - For each store we execute, we update this map. Loads 02236 /// check this to get the most up-to-date value. If evaluation is successful, 02237 /// this state is committed to the process. 02238 DenseMap<Constant*, Constant*> MutatedMemory; 02239 02240 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable 02241 /// to represent its body. This vector is needed so we can delete the 02242 /// temporary globals when we are done. 02243 SmallVector<std::unique_ptr<GlobalVariable>, 32> AllocaTmps; 02244 02245 /// Invariants - These global variables have been marked invariant by the 02246 /// static constructor. 02247 SmallPtrSet<GlobalVariable*, 8> Invariants; 02248 02249 /// SimpleConstants - These are constants we have checked and know to be 02250 /// simple enough to live in a static initializer of a global. 02251 SmallPtrSet<Constant*, 8> SimpleConstants; 02252 02253 const DataLayout *DL; 02254 const TargetLibraryInfo *TLI; 02255 }; 02256 02257 } // anonymous namespace 02258 02259 /// ComputeLoadResult - Return the value that would be computed by a load from 02260 /// P after the stores reflected by 'memory' have been performed. If we can't 02261 /// decide, return null. 02262 Constant *Evaluator::ComputeLoadResult(Constant *P) { 02263 // If this memory location has been recently stored, use the stored value: it 02264 // is the most up-to-date. 02265 DenseMap<Constant*, Constant*>::const_iterator I = MutatedMemory.find(P); 02266 if (I != MutatedMemory.end()) return I->second; 02267 02268 // Access it. 02269 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { 02270 if (GV->hasDefinitiveInitializer()) 02271 return GV->getInitializer(); 02272 return nullptr; 02273 } 02274 02275 // Handle a constantexpr getelementptr. 02276 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P)) 02277 if (CE->getOpcode() == Instruction::GetElementPtr && 02278 isa<GlobalVariable>(CE->getOperand(0))) { 02279 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 02280 if (GV->hasDefinitiveInitializer()) 02281 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); 02282 } 02283 02284 return nullptr; // don't know how to evaluate. 02285 } 02286 02287 /// EvaluateBlock - Evaluate all instructions in block BB, returning true if 02288 /// successful, false if we can't evaluate it. NewBB returns the next BB that 02289 /// control flows into, or null upon return. 02290 bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, 02291 BasicBlock *&NextBB) { 02292 // This is the main evaluation loop. 02293 while (1) { 02294 Constant *InstResult = nullptr; 02295 02296 DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n"); 02297 02298 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) { 02299 if (!SI->isSimple()) { 02300 DEBUG(dbgs() << "Store is not simple! Can not evaluate.\n"); 02301 return false; // no volatile/atomic accesses. 02302 } 02303 Constant *Ptr = getVal(SI->getOperand(1)); 02304 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { 02305 DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr); 02306 Ptr = ConstantFoldConstantExpression(CE, DL, TLI); 02307 DEBUG(dbgs() << "; To: " << *Ptr << "\n"); 02308 } 02309 if (!isSimpleEnoughPointerToCommit(Ptr)) { 02310 // If this is too complex for us to commit, reject it. 02311 DEBUG(dbgs() << "Pointer is too complex for us to evaluate store."); 02312 return false; 02313 } 02314 02315 Constant *Val = getVal(SI->getOperand(0)); 02316 02317 // If this might be too difficult for the backend to handle (e.g. the addr 02318 // of one global variable divided by another) then we can't commit it. 02319 if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) { 02320 DEBUG(dbgs() << "Store value is too complex to evaluate store. " << *Val 02321 << "\n"); 02322 return false; 02323 } 02324 02325 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { 02326 if (CE->getOpcode() == Instruction::BitCast) { 02327 DEBUG(dbgs() << "Attempting to resolve bitcast on constant ptr.\n"); 02328 // If we're evaluating a store through a bitcast, then we need 02329 // to pull the bitcast off the pointer type and push it onto the 02330 // stored value. 02331 Ptr = CE->getOperand(0); 02332 02333 Type *NewTy = cast<PointerType>(Ptr->getType())->getElementType(); 02334 02335 // In order to push the bitcast onto the stored value, a bitcast 02336 // from NewTy to Val's type must be legal. If it's not, we can try 02337 // introspecting NewTy to find a legal conversion. 02338 while (!Val->getType()->canLosslesslyBitCastTo(NewTy)) { 02339 // If NewTy is a struct, we can convert the pointer to the struct 02340 // into a pointer to its first member. 02341 // FIXME: This could be extended to support arrays as well. 02342 if (StructType *STy = dyn_cast<StructType>(NewTy)) { 02343 NewTy = STy->getTypeAtIndex(0U); 02344 02345 IntegerType *IdxTy = IntegerType::get(NewTy->getContext(), 32); 02346 Constant *IdxZero = ConstantInt::get(IdxTy, 0, false); 02347 Constant * const IdxList[] = {IdxZero, IdxZero}; 02348 02349 Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList); 02350 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) 02351 Ptr = ConstantFoldConstantExpression(CE, DL, TLI); 02352 02353 // If we can't improve the situation by introspecting NewTy, 02354 // we have to give up. 02355 } else { 02356 DEBUG(dbgs() << "Failed to bitcast constant ptr, can not " 02357 "evaluate.\n"); 02358 return false; 02359 } 02360 } 02361 02362 // If we found compatible types, go ahead and push the bitcast 02363 // onto the stored value. 02364 Val = ConstantExpr::getBitCast(Val, NewTy); 02365 02366 DEBUG(dbgs() << "Evaluated bitcast: " << *Val << "\n"); 02367 } 02368 } 02369 02370 MutatedMemory[Ptr] = Val; 02371 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) { 02372 InstResult = ConstantExpr::get(BO->getOpcode(), 02373 getVal(BO->getOperand(0)), 02374 getVal(BO->getOperand(1))); 02375 DEBUG(dbgs() << "Found a BinaryOperator! Simplifying: " << *InstResult 02376 << "\n"); 02377 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) { 02378 InstResult = ConstantExpr::getCompare(CI->getPredicate(), 02379 getVal(CI->getOperand(0)), 02380 getVal(CI->getOperand(1))); 02381 DEBUG(dbgs() << "Found a CmpInst! Simplifying: " << *InstResult 02382 << "\n"); 02383 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) { 02384 InstResult = ConstantExpr::getCast(CI->getOpcode(), 02385 getVal(CI->getOperand(0)), 02386 CI->getType()); 02387 DEBUG(dbgs() << "Found a Cast! Simplifying: " << *InstResult 02388 << "\n"); 02389 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) { 02390 InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)), 02391 getVal(SI->getOperand(1)), 02392 getVal(SI->getOperand(2))); 02393 DEBUG(dbgs() << "Found a Select! Simplifying: " << *InstResult 02394 << "\n"); 02395 } else if (auto *EVI = dyn_cast<ExtractValueInst>(CurInst)) { 02396 InstResult = ConstantExpr::getExtractValue( 02397 getVal(EVI->getAggregateOperand()), EVI->getIndices()); 02398 DEBUG(dbgs() << "Found an ExtractValueInst! Simplifying: " << *InstResult 02399 << "\n"); 02400 } else if (auto *IVI = dyn_cast<InsertValueInst>(CurInst)) { 02401 InstResult = ConstantExpr::getInsertValue( 02402 getVal(IVI->getAggregateOperand()), 02403 getVal(IVI->getInsertedValueOperand()), IVI->getIndices()); 02404 DEBUG(dbgs() << "Found an InsertValueInst! Simplifying: " << *InstResult 02405 << "\n"); 02406 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) { 02407 Constant *P = getVal(GEP->getOperand(0)); 02408 SmallVector<Constant*, 8> GEPOps; 02409 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); 02410 i != e; ++i) 02411 GEPOps.push_back(getVal(*i)); 02412 InstResult = 02413 ConstantExpr::getGetElementPtr(P, GEPOps, 02414 cast<GEPOperator>(GEP)->isInBounds()); 02415 DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult 02416 << "\n"); 02417 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) { 02418 02419 if (!LI->isSimple()) { 02420 DEBUG(dbgs() << "Found a Load! Not a simple load, can not evaluate.\n"); 02421 return false; // no volatile/atomic accesses. 02422 } 02423 02424 Constant *Ptr = getVal(LI->getOperand(0)); 02425 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { 02426 Ptr = ConstantFoldConstantExpression(CE, DL, TLI); 02427 DEBUG(dbgs() << "Found a constant pointer expression, constant " 02428 "folding: " << *Ptr << "\n"); 02429 } 02430 InstResult = ComputeLoadResult(Ptr); 02431 if (!InstResult) { 02432 DEBUG(dbgs() << "Failed to compute load result. Can not evaluate load." 02433 "\n"); 02434 return false; // Could not evaluate load. 02435 } 02436 02437 DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n"); 02438 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) { 02439 if (AI->isArrayAllocation()) { 02440 DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n"); 02441 return false; // Cannot handle array allocs. 02442 } 02443 Type *Ty = AI->getType()->getElementType(); 02444 AllocaTmps.push_back( 02445 make_unique<GlobalVariable>(Ty, false, GlobalValue::InternalLinkage, 02446 UndefValue::get(Ty), AI->getName())); 02447 InstResult = AllocaTmps.back().get(); 02448 DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n"); 02449 } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) { 02450 CallSite CS(CurInst); 02451 02452 // Debug info can safely be ignored here. 02453 if (isa<DbgInfoIntrinsic>(CS.getInstruction())) { 02454 DEBUG(dbgs() << "Ignoring debug info.\n"); 02455 ++CurInst; 02456 continue; 02457 } 02458 02459 // Cannot handle inline asm. 02460 if (isa<InlineAsm>(CS.getCalledValue())) { 02461 DEBUG(dbgs() << "Found inline asm, can not evaluate.\n"); 02462 return false; 02463 } 02464 02465 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) { 02466 if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) { 02467 if (MSI->isVolatile()) { 02468 DEBUG(dbgs() << "Can not optimize a volatile memset " << 02469 "intrinsic.\n"); 02470 return false; 02471 } 02472 Constant *Ptr = getVal(MSI->getDest()); 02473 Constant *Val = getVal(MSI->getValue()); 02474 Constant *DestVal = ComputeLoadResult(getVal(Ptr)); 02475 if (Val->isNullValue() && DestVal && DestVal->isNullValue()) { 02476 // This memset is a no-op. 02477 DEBUG(dbgs() << "Ignoring no-op memset.\n"); 02478 ++CurInst; 02479 continue; 02480 } 02481 } 02482 02483 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 02484 II->getIntrinsicID() == Intrinsic::lifetime_end) { 02485 DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n"); 02486 ++CurInst; 02487 continue; 02488 } 02489 02490 if (II->getIntrinsicID() == Intrinsic::invariant_start) { 02491 // We don't insert an entry into Values, as it doesn't have a 02492 // meaningful return value. 02493 if (!II->use_empty()) { 02494 DEBUG(dbgs() << "Found unused invariant_start. Can't evaluate.\n"); 02495 return false; 02496 } 02497 ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0)); 02498 Value *PtrArg = getVal(II->getArgOperand(1)); 02499 Value *Ptr = PtrArg->stripPointerCasts(); 02500 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) { 02501 Type *ElemTy = cast<PointerType>(GV->getType())->getElementType(); 02502 if (DL && !Size->isAllOnesValue() && 02503 Size->getValue().getLimitedValue() >= 02504 DL->getTypeStoreSize(ElemTy)) { 02505 Invariants.insert(GV); 02506 DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV 02507 << "\n"); 02508 } else { 02509 DEBUG(dbgs() << "Found a global var, but can not treat it as an " 02510 "invariant.\n"); 02511 } 02512 } 02513 // Continue even if we do nothing. 02514 ++CurInst; 02515 continue; 02516 } 02517 02518 DEBUG(dbgs() << "Unknown intrinsic. Can not evaluate.\n"); 02519 return false; 02520 } 02521 02522 // Resolve function pointers. 02523 Function *Callee = dyn_cast<Function>(getVal(CS.getCalledValue())); 02524 if (!Callee || Callee->mayBeOverridden()) { 02525 DEBUG(dbgs() << "Can not resolve function pointer.\n"); 02526 return false; // Cannot resolve. 02527 } 02528 02529 SmallVector<Constant*, 8> Formals; 02530 for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i) 02531 Formals.push_back(getVal(*i)); 02532 02533 if (Callee->isDeclaration()) { 02534 // If this is a function we can constant fold, do it. 02535 if (Constant *C = ConstantFoldCall(Callee, Formals, TLI)) { 02536 InstResult = C; 02537 DEBUG(dbgs() << "Constant folded function call. Result: " << 02538 *InstResult << "\n"); 02539 } else { 02540 DEBUG(dbgs() << "Can not constant fold function call.\n"); 02541 return false; 02542 } 02543 } else { 02544 if (Callee->getFunctionType()->isVarArg()) { 02545 DEBUG(dbgs() << "Can not constant fold vararg function call.\n"); 02546 return false; 02547 } 02548 02549 Constant *RetVal = nullptr; 02550 // Execute the call, if successful, use the return value. 02551 ValueStack.emplace_back(); 02552 if (!EvaluateFunction(Callee, RetVal, Formals)) { 02553 DEBUG(dbgs() << "Failed to evaluate function.\n"); 02554 return false; 02555 } 02556 ValueStack.pop_back(); 02557 InstResult = RetVal; 02558 02559 if (InstResult) { 02560 DEBUG(dbgs() << "Successfully evaluated function. Result: " << 02561 InstResult << "\n\n"); 02562 } else { 02563 DEBUG(dbgs() << "Successfully evaluated function. Result: 0\n\n"); 02564 } 02565 } 02566 } else if (isa<TerminatorInst>(CurInst)) { 02567 DEBUG(dbgs() << "Found a terminator instruction.\n"); 02568 02569 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) { 02570 if (BI->isUnconditional()) { 02571 NextBB = BI->getSuccessor(0); 02572 } else { 02573 ConstantInt *Cond = 02574 dyn_cast<ConstantInt>(getVal(BI->getCondition())); 02575 if (!Cond) return false; // Cannot determine. 02576 02577 NextBB = BI->getSuccessor(!Cond->getZExtValue()); 02578 } 02579 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) { 02580 ConstantInt *Val = 02581 dyn_cast<ConstantInt>(getVal(SI->getCondition())); 02582 if (!Val) return false; // Cannot determine. 02583 NextBB = SI->findCaseValue(Val).getCaseSuccessor(); 02584 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) { 02585 Value *Val = getVal(IBI->getAddress())->stripPointerCasts(); 02586 if (BlockAddress *BA = dyn_cast<BlockAddress>(Val)) 02587 NextBB = BA->getBasicBlock(); 02588 else 02589 return false; // Cannot determine. 02590 } else if (isa<ReturnInst>(CurInst)) { 02591 NextBB = nullptr; 02592 } else { 02593 // invoke, unwind, resume, unreachable. 02594 DEBUG(dbgs() << "Can not handle terminator."); 02595 return false; // Cannot handle this terminator. 02596 } 02597 02598 // We succeeded at evaluating this block! 02599 DEBUG(dbgs() << "Successfully evaluated block.\n"); 02600 return true; 02601 } else { 02602 // Did not know how to evaluate this! 02603 DEBUG(dbgs() << "Failed to evaluate block due to unhandled instruction." 02604 "\n"); 02605 return false; 02606 } 02607 02608 if (!CurInst->use_empty()) { 02609 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult)) 02610 InstResult = ConstantFoldConstantExpression(CE, DL, TLI); 02611 02612 setVal(CurInst, InstResult); 02613 } 02614 02615 // If we just processed an invoke, we finished evaluating the block. 02616 if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) { 02617 NextBB = II->getNormalDest(); 02618 DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n"); 02619 return true; 02620 } 02621 02622 // Advance program counter. 02623 ++CurInst; 02624 } 02625 } 02626 02627 /// EvaluateFunction - Evaluate a call to function F, returning true if 02628 /// successful, false if we can't evaluate it. ActualArgs contains the formal 02629 /// arguments for the function. 02630 bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal, 02631 const SmallVectorImpl<Constant*> &ActualArgs) { 02632 // Check to see if this function is already executing (recursion). If so, 02633 // bail out. TODO: we might want to accept limited recursion. 02634 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end()) 02635 return false; 02636 02637 CallStack.push_back(F); 02638 02639 // Initialize arguments to the incoming values specified. 02640 unsigned ArgNo = 0; 02641 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; 02642 ++AI, ++ArgNo) 02643 setVal(AI, ActualArgs[ArgNo]); 02644 02645 // ExecutedBlocks - We only handle non-looping, non-recursive code. As such, 02646 // we can only evaluate any one basic block at most once. This set keeps 02647 // track of what we have executed so we can detect recursive cases etc. 02648 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks; 02649 02650 // CurBB - The current basic block we're evaluating. 02651 BasicBlock *CurBB = F->begin(); 02652 02653 BasicBlock::iterator CurInst = CurBB->begin(); 02654 02655 while (1) { 02656 BasicBlock *NextBB = nullptr; // Initialized to avoid compiler warnings. 02657 DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n"); 02658 02659 if (!EvaluateBlock(CurInst, NextBB)) 02660 return false; 02661 02662 if (!NextBB) { 02663 // Successfully running until there's no next block means that we found 02664 // the return. Fill it the return value and pop the call stack. 02665 ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator()); 02666 if (RI->getNumOperands()) 02667 RetVal = getVal(RI->getOperand(0)); 02668 CallStack.pop_back(); 02669 return true; 02670 } 02671 02672 // Okay, we succeeded in evaluating this control flow. See if we have 02673 // executed the new block before. If so, we have a looping function, 02674 // which we cannot evaluate in reasonable time. 02675 if (!ExecutedBlocks.insert(NextBB)) 02676 return false; // looped! 02677 02678 // Okay, we have never been in this block before. Check to see if there 02679 // are any PHI nodes. If so, evaluate them with information about where 02680 // we came from. 02681 PHINode *PN = nullptr; 02682 for (CurInst = NextBB->begin(); 02683 (PN = dyn_cast<PHINode>(CurInst)); ++CurInst) 02684 setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB))); 02685 02686 // Advance to the next block. 02687 CurBB = NextBB; 02688 } 02689 } 02690 02691 /// EvaluateStaticConstructor - Evaluate static constructors in the function, if 02692 /// we can. Return true if we can, false otherwise. 02693 static bool EvaluateStaticConstructor(Function *F, const DataLayout *DL, 02694 const TargetLibraryInfo *TLI) { 02695 // Call the function. 02696 Evaluator Eval(DL, TLI); 02697 Constant *RetValDummy; 02698 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy, 02699 SmallVector<Constant*, 0>()); 02700 02701 if (EvalSuccess) { 02702 ++NumCtorsEvaluated; 02703 02704 // We succeeded at evaluation: commit the result. 02705 DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" 02706 << F->getName() << "' to " << Eval.getMutatedMemory().size() 02707 << " stores.\n"); 02708 for (DenseMap<Constant*, Constant*>::const_iterator I = 02709 Eval.getMutatedMemory().begin(), E = Eval.getMutatedMemory().end(); 02710 I != E; ++I) 02711 CommitValueTo(I->second, I->first); 02712 for (GlobalVariable *GV : Eval.getInvariants()) 02713 GV->setConstant(true); 02714 } 02715 02716 return EvalSuccess; 02717 } 02718 02719 static int compareNames(Constant *const *A, Constant *const *B) { 02720 return (*A)->getName().compare((*B)->getName()); 02721 } 02722 02723 static void setUsedInitializer(GlobalVariable &V, 02724 const SmallPtrSet<GlobalValue *, 8> &Init) { 02725 if (Init.empty()) { 02726 V.eraseFromParent(); 02727 return; 02728 } 02729 02730 // Type of pointer to the array of pointers. 02731 PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0); 02732 02733 SmallVector<llvm::Constant *, 8> UsedArray; 02734 for (GlobalValue *GV : Init) { 02735 Constant *Cast 02736 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy); 02737 UsedArray.push_back(Cast); 02738 } 02739 // Sort to get deterministic order. 02740 array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames); 02741 ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size()); 02742 02743 Module *M = V.getParent(); 02744 V.removeFromParent(); 02745 GlobalVariable *NV = 02746 new GlobalVariable(*M, ATy, false, llvm::GlobalValue::AppendingLinkage, 02747 llvm::ConstantArray::get(ATy, UsedArray), ""); 02748 NV->takeName(&V); 02749 NV->setSection("llvm.metadata"); 02750 delete &V; 02751 } 02752 02753 namespace { 02754 /// \brief An easy to access representation of llvm.used and llvm.compiler.used. 02755 class LLVMUsed { 02756 SmallPtrSet<GlobalValue *, 8> Used; 02757 SmallPtrSet<GlobalValue *, 8> CompilerUsed; 02758 GlobalVariable *UsedV; 02759 GlobalVariable *CompilerUsedV; 02760 02761 public: 02762 LLVMUsed(Module &M) { 02763 UsedV = collectUsedGlobalVariables(M, Used, false); 02764 CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true); 02765 } 02766 typedef SmallPtrSet<GlobalValue *, 8>::iterator iterator; 02767 typedef iterator_range<iterator> used_iterator_range; 02768 iterator usedBegin() { return Used.begin(); } 02769 iterator usedEnd() { return Used.end(); } 02770 used_iterator_range used() { 02771 return used_iterator_range(usedBegin(), usedEnd()); 02772 } 02773 iterator compilerUsedBegin() { return CompilerUsed.begin(); } 02774 iterator compilerUsedEnd() { return CompilerUsed.end(); } 02775 used_iterator_range compilerUsed() { 02776 return used_iterator_range(compilerUsedBegin(), compilerUsedEnd()); 02777 } 02778 bool usedCount(GlobalValue *GV) const { return Used.count(GV); } 02779 bool compilerUsedCount(GlobalValue *GV) const { 02780 return CompilerUsed.count(GV); 02781 } 02782 bool usedErase(GlobalValue *GV) { return Used.erase(GV); } 02783 bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); } 02784 bool usedInsert(GlobalValue *GV) { return Used.insert(GV); } 02785 bool compilerUsedInsert(GlobalValue *GV) { return CompilerUsed.insert(GV); } 02786 02787 void syncVariablesAndSets() { 02788 if (UsedV) 02789 setUsedInitializer(*UsedV, Used); 02790 if (CompilerUsedV) 02791 setUsedInitializer(*CompilerUsedV, CompilerUsed); 02792 } 02793 }; 02794 } 02795 02796 static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) { 02797 if (GA.use_empty()) // No use at all. 02798 return false; 02799 02800 assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && 02801 "We should have removed the duplicated " 02802 "element from llvm.compiler.used"); 02803 if (!GA.hasOneUse()) 02804 // Strictly more than one use. So at least one is not in llvm.used and 02805 // llvm.compiler.used. 02806 return true; 02807 02808 // Exactly one use. Check if it is in llvm.used or llvm.compiler.used. 02809 return !U.usedCount(&GA) && !U.compilerUsedCount(&GA); 02810 } 02811 02812 static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V, 02813 const LLVMUsed &U) { 02814 unsigned N = 2; 02815 assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) && 02816 "We should have removed the duplicated " 02817 "element from llvm.compiler.used"); 02818 if (U.usedCount(&V) || U.compilerUsedCount(&V)) 02819 ++N; 02820 return V.hasNUsesOrMore(N); 02821 } 02822 02823 static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) { 02824 if (!GA.hasLocalLinkage()) 02825 return true; 02826 02827 return U.usedCount(&GA) || U.compilerUsedCount(&GA); 02828 } 02829 02830 static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U, 02831 bool &RenameTarget) { 02832 RenameTarget = false; 02833 bool Ret = false; 02834 if (hasUseOtherThanLLVMUsed(GA, U)) 02835 Ret = true; 02836 02837 // If the alias is externally visible, we may still be able to simplify it. 02838 if (!mayHaveOtherReferences(GA, U)) 02839 return Ret; 02840 02841 // If the aliasee has internal linkage, give it the name and linkage 02842 // of the alias, and delete the alias. This turns: 02843 // define internal ... @f(...) 02844 // @a = alias ... @f 02845 // into: 02846 // define ... @a(...) 02847 Constant *Aliasee = GA.getAliasee(); 02848 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); 02849 if (!Target->hasLocalLinkage()) 02850 return Ret; 02851 02852 // Do not perform the transform if multiple aliases potentially target the 02853 // aliasee. This check also ensures that it is safe to replace the section 02854 // and other attributes of the aliasee with those of the alias. 02855 if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U)) 02856 return Ret; 02857 02858 RenameTarget = true; 02859 return true; 02860 } 02861 02862 bool GlobalOpt::OptimizeGlobalAliases(Module &M) { 02863 bool Changed = false; 02864 LLVMUsed Used(M); 02865 02866 for (GlobalValue *GV : Used.used()) 02867 Used.compilerUsedErase(GV); 02868 02869 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); 02870 I != E;) { 02871 Module::alias_iterator J = I++; 02872 // Aliases without names cannot be referenced outside this module. 02873 if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage()) 02874 J->setLinkage(GlobalValue::InternalLinkage); 02875 // If the aliasee may change at link time, nothing can be done - bail out. 02876 if (J->mayBeOverridden()) 02877 continue; 02878 02879 Constant *Aliasee = J->getAliasee(); 02880 GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts()); 02881 // We can't trivially replace the alias with the aliasee if the aliasee is 02882 // non-trivial in some way. 02883 // TODO: Try to handle non-zero GEPs of local aliasees. 02884 if (!Target) 02885 continue; 02886 Target->removeDeadConstantUsers(); 02887 02888 // Make all users of the alias use the aliasee instead. 02889 bool RenameTarget; 02890 if (!hasUsesToReplace(*J, Used, RenameTarget)) 02891 continue; 02892 02893 J->replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J->getType())); 02894 ++NumAliasesResolved; 02895 Changed = true; 02896 02897 if (RenameTarget) { 02898 // Give the aliasee the name, linkage and other attributes of the alias. 02899 Target->takeName(J); 02900 Target->setLinkage(J->getLinkage()); 02901 Target->setVisibility(J->getVisibility()); 02902 Target->setDLLStorageClass(J->getDLLStorageClass()); 02903 02904 if (Used.usedErase(J)) 02905 Used.usedInsert(Target); 02906 02907 if (Used.compilerUsedErase(J)) 02908 Used.compilerUsedInsert(Target); 02909 } else if (mayHaveOtherReferences(*J, Used)) 02910 continue; 02911 02912 // Delete the alias. 02913 M.getAliasList().erase(J); 02914 ++NumAliasesRemoved; 02915 Changed = true; 02916 } 02917 02918 Used.syncVariablesAndSets(); 02919 02920 return Changed; 02921 } 02922 02923 static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) { 02924 if (!TLI->has(LibFunc::cxa_atexit)) 02925 return nullptr; 02926 02927 Function *Fn = M.getFunction(TLI->getName(LibFunc::cxa_atexit)); 02928 02929 if (!Fn) 02930 return nullptr; 02931 02932 FunctionType *FTy = Fn->getFunctionType(); 02933 02934 // Checking that the function has the right return type, the right number of 02935 // parameters and that they all have pointer types should be enough. 02936 if (!FTy->getReturnType()->isIntegerTy() || 02937 FTy->getNumParams() != 3 || 02938 !FTy->getParamType(0)->isPointerTy() || 02939 !FTy->getParamType(1)->isPointerTy() || 02940 !FTy->getParamType(2)->isPointerTy()) 02941 return nullptr; 02942 02943 return Fn; 02944 } 02945 02946 /// cxxDtorIsEmpty - Returns whether the given function is an empty C++ 02947 /// destructor and can therefore be eliminated. 02948 /// Note that we assume that other optimization passes have already simplified 02949 /// the code so we only look for a function with a single basic block, where 02950 /// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and 02951 /// other side-effect free instructions. 02952 static bool cxxDtorIsEmpty(const Function &Fn, 02953 SmallPtrSet<const Function *, 8> &CalledFunctions) { 02954 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and 02955 // nounwind, but that doesn't seem worth doing. 02956 if (Fn.isDeclaration()) 02957 return false; 02958 02959 if (++Fn.begin() != Fn.end()) 02960 return false; 02961 02962 const BasicBlock &EntryBlock = Fn.getEntryBlock(); 02963 for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end(); 02964 I != E; ++I) { 02965 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 02966 // Ignore debug intrinsics. 02967 if (isa<DbgInfoIntrinsic>(CI)) 02968 continue; 02969 02970 const Function *CalledFn = CI->getCalledFunction(); 02971 02972 if (!CalledFn) 02973 return false; 02974 02975 SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions); 02976 02977 // Don't treat recursive functions as empty. 02978 if (!NewCalledFunctions.insert(CalledFn)) 02979 return false; 02980 02981 if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions)) 02982 return false; 02983 } else if (isa<ReturnInst>(*I)) 02984 return true; // We're done. 02985 else if (I->mayHaveSideEffects()) 02986 return false; // Destructor with side effects, bail. 02987 } 02988 02989 return false; 02990 } 02991 02992 bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { 02993 /// Itanium C++ ABI p3.3.5: 02994 /// 02995 /// After constructing a global (or local static) object, that will require 02996 /// destruction on exit, a termination function is registered as follows: 02997 /// 02998 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d ); 02999 /// 03000 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the 03001 /// call f(p) when DSO d is unloaded, before all such termination calls 03002 /// registered before this one. It returns zero if registration is 03003 /// successful, nonzero on failure. 03004 03005 // This pass will look for calls to __cxa_atexit where the function is trivial 03006 // and remove them. 03007 bool Changed = false; 03008 03009 for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end(); 03010 I != E;) { 03011 // We're only interested in calls. Theoretically, we could handle invoke 03012 // instructions as well, but neither llvm-gcc nor clang generate invokes 03013 // to __cxa_atexit. 03014 CallInst *CI = dyn_cast<CallInst>(*I++); 03015 if (!CI) 03016 continue; 03017 03018 Function *DtorFn = 03019 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts()); 03020 if (!DtorFn) 03021 continue; 03022 03023 SmallPtrSet<const Function *, 8> CalledFunctions; 03024 if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions)) 03025 continue; 03026 03027 // Just remove the call. 03028 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType())); 03029 CI->eraseFromParent(); 03030 03031 ++NumCXXDtorsRemoved; 03032 03033 Changed |= true; 03034 } 03035 03036 return Changed; 03037 } 03038 03039 bool GlobalOpt::runOnModule(Module &M) { 03040 bool Changed = false; 03041 03042 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 03043 DL = DLP ? &DLP->getDataLayout() : nullptr; 03044 TLI = &getAnalysis<TargetLibraryInfo>(); 03045 03046 bool LocalChange = true; 03047 while (LocalChange) { 03048 LocalChange = false; 03049 03050 // Delete functions that are trivially dead, ccc -> fastcc 03051 LocalChange |= OptimizeFunctions(M); 03052 03053 // Optimize global_ctors list. 03054 LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) { 03055 return EvaluateStaticConstructor(F, DL, TLI); 03056 }); 03057 03058 // Optimize non-address-taken globals. 03059 LocalChange |= OptimizeGlobalVars(M); 03060 03061 // Resolve aliases, when possible. 03062 LocalChange |= OptimizeGlobalAliases(M); 03063 03064 // Try to remove trivial global destructors if they are not removed 03065 // already. 03066 Function *CXAAtExitFn = FindCXAAtExit(M, TLI); 03067 if (CXAAtExitFn) 03068 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn); 03069 03070 Changed |= LocalChange; 03071 } 03072 03073 // TODO: Move all global ctors functions to the end of the module for code 03074 // layout. 03075 03076 return Changed; 03077 }