diff --git a/llvm/lib/Transforms/IPO/FunctionImport.cpp b/llvm/lib/Transforms/IPO/FunctionImport.cpp index 8aeeb4e4ec82c..99a2a55c58d4e 100644 --- a/llvm/lib/Transforms/IPO/FunctionImport.cpp +++ b/llvm/lib/Transforms/IPO/FunctionImport.cpp @@ -578,7 +578,6 @@ class WorkloadImportsManager : public ModuleImportsManager { GlobalsImporter GVI(Index, DefinedGVSummaries, IsPrevailing, ImportList, ExportLists); auto &ValueInfos = SetIter->second; - SmallVector GlobWorklist; for (auto &VI : llvm::make_early_inc_range(ValueInfos)) { auto It = DefinedGVSummaries.find(VI.getGUID()); if (It != DefinedGVSummaries.end() && diff --git a/llvm/lib/Transforms/IPO/IROutliner.cpp b/llvm/lib/Transforms/IPO/IROutliner.cpp index 6c9d499b0e7ea..92d10d1c85ca5 100644 --- a/llvm/lib/Transforms/IPO/IROutliner.cpp +++ b/llvm/lib/Transforms/IPO/IROutliner.cpp @@ -2702,7 +2702,7 @@ void IROutliner::updateOutputMapping(OutlinableRegion &Region, } bool IROutliner::extractSection(OutlinableRegion &Region) { - SetVector ArgInputs, Outputs, SinkCands; + SetVector ArgInputs, Outputs; assert(Region.StartBB && "StartBB for the OutlinableRegion is nullptr!"); BasicBlock *InitialStart = Region.StartBB; Function *OrigF = Region.StartBB->getParent(); diff --git a/llvm/lib/Transforms/IPO/Internalize.cpp b/llvm/lib/Transforms/IPO/Internalize.cpp index 4b690bf2b2fc1..d34444a2697b2 100644 --- a/llvm/lib/Transforms/IPO/Internalize.cpp +++ b/llvm/lib/Transforms/IPO/Internalize.cpp @@ -132,7 +132,6 @@ bool InternalizePass::shouldPreserveGV(const GlobalValue &GV) { bool InternalizePass::maybeInternalize( GlobalValue &GV, DenseMap &ComdatMap) { - SmallString<0> ComdatName; if (Comdat *C = GV.getComdat()) { // For GlobalAlias, C is the aliasee object's comdat which may have been // redirected. So ComdatMap may not contain C. diff --git a/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp index 44df2239c475a..6bbf99cd8d510 100644 --- a/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp @@ -2040,8 +2040,6 @@ bool NumericalStabilitySanitizer::sanitizeFunction( // the module constructor. if (F.getName() == kNsanModuleCtorName) return false; - SmallVector AllLoadsAndStores; - SmallVector LocalLoadsAndStores; // The instrumentation maintains: // - for each IR value `v` of floating-point (or vector floating-point) type diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp index 9d7f5e64f9868..2ef87f531dfae 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp +++ b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp @@ -2301,11 +2301,8 @@ FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB, /// Look for an ``autorelease'' instruction dependent on Arg such that there are /// no instructions dependent on Arg that need a positive ref count in between /// the autorelease and the ret. -static CallInst * -FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB, - ReturnInst *Ret, - ProvenanceAnalysis &PA) { - SmallPtrSet DepInsts; +static CallInst *FindPredecessorAutoreleaseWithSafePath( + const Value *Arg, BasicBlock *BB, ReturnInst *Ret, ProvenanceAnalysis &PA) { auto *Autorelease = dyn_cast_or_null( findSingleDependency(NeedsPositiveRetainCount, Arg, BB, Ret, PA)); diff --git a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp index 8c8effe2b013e..fb7bc7c4be6c8 100644 --- a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp +++ b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp @@ -1463,7 +1463,6 @@ bool LoopInterchangeTransform::transform() { BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader(); BasicBlock *OuterLoopHeader = OuterLoop->getHeader(); if (InnerLoopPreHeader != OuterLoopHeader) { - SmallPtrSet NeedsMoving; for (Instruction &I : make_early_inc_range(make_range(InnerLoopPreHeader->begin(), std::prev(InnerLoopPreHeader->end())))) diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp index d5fbbf0c10011..3eb118908959f 100644 --- a/llvm/lib/Transforms/Scalar/NewGVN.cpp +++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp @@ -2737,7 +2737,6 @@ NewGVN::makePossiblePHIOfOps(Instruction *I, if (!isCycleFree(I)) return nullptr; - SmallPtrSet ProcessedPHIs; // TODO: We don't do phi translation on memory accesses because it's // complicated. For a load, we'd need to be able to simulate a new memoryuse, // which we don't have a good way of doing ATM. diff --git a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp index 4c6f6f12d7138..c6962e51b2238 100644 --- a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp +++ b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp @@ -2762,7 +2762,6 @@ static BranchInst *turnSelectIntoBranch(SelectInst *SI, DominatorTree &DT, static BranchInst *turnGuardIntoBranch(IntrinsicInst *GI, Loop &L, DominatorTree &DT, LoopInfo &LI, MemorySSAUpdater *MSSAU) { - SmallVector DTUpdates; LLVM_DEBUG(dbgs() << "Turning " << *GI << " into a branch.\n"); BasicBlock *CheckBB = GI->getParent(); diff --git a/llvm/lib/Transforms/Utils/EscapeEnumerator.cpp b/llvm/lib/Transforms/Utils/EscapeEnumerator.cpp index cc00106fcbfe0..bafaf6feaaee6 100644 --- a/llvm/lib/Transforms/Utils/EscapeEnumerator.cpp +++ b/llvm/lib/Transforms/Utils/EscapeEnumerator.cpp @@ -87,7 +87,6 @@ IRBuilder<> *EscapeEnumerator::Next() { // Transform the 'call' instructions into 'invoke's branching to the // cleanup block. Go in reverse order to make prettier BB names. - SmallVector Args; for (unsigned I = Calls.size(); I != 0;) { CallInst *CI = cast(Calls[--I]); changeToInvokeAndSplitBasicBlock(CI, CleanupBB, DTU); diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index c0e9b8dc2201f..4d183bb42f0e1 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -7186,7 +7186,6 @@ void BoUpSLP::reorderTopToBottom() { MapVector> OrdersUses; - SmallPtrSet VisitedOps; for (const TreeEntry *OpTE : OrderedEntries) { // No need to reorder this nodes, still need to extend and to use shuffle, // just need to merge reordering shuffle and the reuse shuffle. @@ -8153,7 +8152,6 @@ static void gatherPossiblyVectorizableLoads( int &Offset, unsigned &Start) { if (Loads.empty()) return GatheredLoads.end(); - SmallVector> Res; LoadInst *LI = Loads.front().first; for (auto [Idx, Data] : enumerate(GatheredLoads)) { if (Idx < Start) @@ -13801,7 +13799,6 @@ bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const { // with the second gather nodes if they have less scalar operands rather than // the initial tree element (may be profitable to shuffle the second gather) // or they are extractelements, which form shuffle. - SmallVector Mask; if (VectorizableTree[0]->State == TreeEntry::Vectorize && AreVectorizableGathers(VectorizableTree[1].get(), VectorizableTree[0]->Scalars.size())) @@ -16875,8 +16872,6 @@ ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Type *ScalarTy, unsigned VF = E->getVectorFactor(); bool NeedFreeze = false; - SmallVector ReuseShuffleIndices(E->ReuseShuffleIndices.begin(), - E->ReuseShuffleIndices.end()); SmallVector GatheredScalars(E->Scalars.begin(), E->Scalars.end()); // Clear values, to be replaced by insertvector instructions. for (auto [EIdx, Idx] : E->CombinedEntriesWithIndices) @@ -17619,7 +17614,6 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { SmallPtrSet VisitedBBs; for (unsigned I : seq(PH->getNumIncomingValues())) { - ValueList Operands; BasicBlock *IBB = PH->getIncomingBlock(I); // Stop emission if all incoming values are generated. @@ -18291,7 +18285,6 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { TysForDecl.push_back(VecTy); auto *CEI = cast(VL0); for (unsigned I : seq(0, CI->arg_size())) { - ValueList OpVL; // Some intrinsics have scalar arguments. This argument should not be // vectorized. if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(ID, I, TTI)) { diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp index a4aab8a2dee79..749ef4fe83bc8 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -554,7 +554,6 @@ VPBasicBlock *VPBasicBlock::splitAt(iterator SplitAt) { assert((SplitAt == end() || SplitAt->getParent() == this) && "can only split at a position in the same block"); - SmallVector Succs(successors()); // Create new empty block after the block to split. auto *SplitBlock = getPlan()->createVPBasicBlock(getName() + ".split"); VPBlockUtils::insertBlockAfter(SplitBlock, this); diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index 500622f57f89f..2108fbadb51d8 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -1892,8 +1892,6 @@ static Value *getStepVector(Value *Val, Value *Step, "Induction Step must be an integer or FP"); assert(Step->getType() == STy && "Step has wrong type"); - SmallVector Indices; - // Create a vector of consecutive numbers from zero to VF. VectorType *InitVecValVTy = ValVTy; if (STy->isFloatingPointTy()) {