diff --git a/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h b/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h index dfda2dcee0db1..09a8875e1e28c 100644 --- a/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h +++ b/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h @@ -545,10 +545,6 @@ struct IRInstructionMapper { // dependent. InstrType visitLandingPadInst(LandingPadInst &LPI) { return Illegal; } InstrType visitFuncletPadInst(FuncletPadInst &FPI) { return Illegal; } - // DebugInfo should be included in the regions, but should not be - // analyzed for similarity as it has no bearing on the outcome of the - // program. - InstrType visitDbgInfoIntrinsic(DbgInfoIntrinsic &DII) { return Invisible; } InstrType visitIntrinsicInst(IntrinsicInst &II) { // These are disabled due to complications in the CodeExtractor when // outlining these instructions. For instance, It is unclear what we diff --git a/llvm/include/llvm/Analysis/PtrUseVisitor.h b/llvm/include/llvm/Analysis/PtrUseVisitor.h index c9d3874e7dd96..0858d8aee2186 100644 --- a/llvm/include/llvm/Analysis/PtrUseVisitor.h +++ b/llvm/include/llvm/Analysis/PtrUseVisitor.h @@ -285,7 +285,6 @@ class PtrUseVisitor : protected InstVisitor, // No-op intrinsics which we know don't escape the pointer to logic in // some other function. - void visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) {} void visitMemIntrinsic(MemIntrinsic &I) {} void visitIntrinsicInst(IntrinsicInst &II) { switch (II.getIntrinsicID()) { diff --git a/llvm/include/llvm/IR/InstVisitor.h b/llvm/include/llvm/IR/InstVisitor.h index b4eb729c7ce38..6d5398bb7a4cd 100644 --- a/llvm/include/llvm/IR/InstVisitor.h +++ b/llvm/include/llvm/IR/InstVisitor.h @@ -199,13 +199,6 @@ class InstVisitor { RetTy visitCatchPadInst(CatchPadInst &I) { DELEGATE(FuncletPadInst); } RetTy visitFreezeInst(FreezeInst &I) { DELEGATE(Instruction); } - // Handle the special intrinsic instruction classes. - RetTy visitDbgDeclareInst(DbgDeclareInst &I) { DELEGATE(DbgVariableIntrinsic);} - RetTy visitDbgValueInst(DbgValueInst &I) { DELEGATE(DbgVariableIntrinsic);} - RetTy visitDbgVariableIntrinsic(DbgVariableIntrinsic &I) - { DELEGATE(DbgInfoIntrinsic);} - RetTy visitDbgLabelInst(DbgLabelInst &I) { DELEGATE(DbgInfoIntrinsic);} - RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I){ DELEGATE(IntrinsicInst); } RetTy visitMemSetInst(MemSetInst &I) { DELEGATE(MemIntrinsic); } RetTy visitMemSetPatternInst(MemSetPatternInst &I) { DELEGATE(IntrinsicInst); @@ -286,9 +279,6 @@ class InstVisitor { if (const Function *F = I.getCalledFunction()) { switch (F->getIntrinsicID()) { default: DELEGATE(IntrinsicInst); - case Intrinsic::dbg_declare: DELEGATE(DbgDeclareInst); - case Intrinsic::dbg_value: DELEGATE(DbgValueInst); - case Intrinsic::dbg_label: DELEGATE(DbgLabelInst); case Intrinsic::memcpy: case Intrinsic::memcpy_inline: DELEGATE(MemCpyInst); diff --git a/llvm/include/llvm/Transforms/Utils/Local.h b/llvm/include/llvm/Transforms/Utils/Local.h index 55e153f289590..df146458b4e6f 100644 --- a/llvm/include/llvm/Transforms/Utils/Local.h +++ b/llvm/include/llvm/Transforms/Utils/Local.h @@ -394,12 +394,9 @@ handleUnreachableTerminator(Instruction *I, SmallVectorImpl &PoisonedValues); /// Remove all instructions from a basic block other than its terminator -/// and any present EH pad instructions. Returns a pair where the first element -/// is the number of instructions (excluding debug info intrinsics) that have -/// been removed, and the second element is the number of debug info intrinsics +/// and any present EH pad instructions. Returns the number of instructions /// that have been removed. -LLVM_ABI std::pair -removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB); +LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB); /// Insert an unreachable instruction before the specified /// instruction, making it and the rest of the code in the block dead. diff --git a/llvm/lib/Analysis/AliasSetTracker.cpp b/llvm/lib/Analysis/AliasSetTracker.cpp index 6d1dafbae60b9..1e2f05b60a9a3 100644 --- a/llvm/lib/Analysis/AliasSetTracker.cpp +++ b/llvm/lib/Analysis/AliasSetTracker.cpp @@ -343,9 +343,6 @@ void AliasSetTracker::add(AnyMemTransferInst *MTI) { } void AliasSetTracker::addUnknown(Instruction *Inst) { - if (isa(Inst)) - return; // Ignore DbgInfo Intrinsics. - if (auto *II = dyn_cast(Inst)) { // These intrinsics will show up as affecting memory, but they are just // markers. diff --git a/llvm/lib/Analysis/CallGraph.cpp b/llvm/lib/Analysis/CallGraph.cpp index 5d1af52e8ab58..d7695e5cfc0d3 100644 --- a/llvm/lib/Analysis/CallGraph.cpp +++ b/llvm/lib/Analysis/CallGraph.cpp @@ -34,8 +34,7 @@ CallGraph::CallGraph(Module &M) CallsExternalNode(std::make_unique(this, nullptr)) { // Add every interesting function to the call graph. for (Function &F : M) - if (!isDbgInfoIntrinsic(F.getIntrinsicID())) - addToCallGraph(&F); + addToCallGraph(&F); } CallGraph::CallGraph(CallGraph &&Arg) @@ -101,7 +100,7 @@ void CallGraph::populateCallGraphNode(CallGraphNode *Node) { const Function *Callee = Call->getCalledFunction(); if (!Callee) Node->addCalledFunction(Call, CallsExternalNode.get()); - else if (!isDbgInfoIntrinsic(Callee->getIntrinsicID())) + else Node->addCalledFunction(Call, getOrInsertFunction(Callee)); // Add reference to callback functions. diff --git a/llvm/lib/Analysis/DemandedBits.cpp b/llvm/lib/Analysis/DemandedBits.cpp index d7e2a3fa4fc59..6694d5cc06c8c 100644 --- a/llvm/lib/Analysis/DemandedBits.cpp +++ b/llvm/lib/Analysis/DemandedBits.cpp @@ -46,8 +46,7 @@ using namespace llvm::PatternMatch; #define DEBUG_TYPE "demanded-bits" static bool isAlwaysLive(Instruction *I) { - return I->isTerminator() || isa(I) || I->isEHPad() || - I->mayHaveSideEffects(); + return I->isTerminator() || I->isEHPad() || I->mayHaveSideEffects(); } void DemandedBits::determineLiveOperandBits( diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp index 425f3682122cd..71a75b496455a 100644 --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -434,7 +434,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &S // If we see a free or a call which may write to memory (i.e. which might do // a free) the pointer could be marked invalid. if (isa(BBI) && BBI->mayWriteToMemory() && - !isa(BBI) && !isa(BBI)) + !isa(BBI)) return false; Value *AccessedPtr; diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp index f062189bac6a0..d6f490cb69a52 100644 --- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -188,9 +188,6 @@ MemDepResult MemoryDependenceResults::getCallDependencyFrom( // Walk backwards through the block, looking for dependencies. while (ScanIt != BB->begin()) { Instruction *Inst = &*--ScanIt; - // Debug intrinsics don't cause dependences and should not affect Limit - if (isa(Inst)) - continue; // Limit the amount of scanning we do so we don't end up with quadratic // running time on extreme testcases. @@ -432,11 +429,6 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom( while (ScanIt != BB->begin()) { Instruction *Inst = &*--ScanIt; - if (IntrinsicInst *II = dyn_cast(Inst)) - // Debug intrinsics don't (and can't) cause dependencies. - if (isa(II)) - continue; - // Limit the amount of scanning we do so we don't end up with quadratic // running time on extreme testcases. --*Limit; diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index d8c1096049dce..3cf3ad6cc95f3 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -7842,8 +7842,6 @@ bool llvm::isGuaranteedToTransferExecutionToSuccessor( iterator_range Range, unsigned ScanLimit) { assert(ScanLimit && "scan limit must be non-zero"); for (const Instruction &I : Range) { - if (isa(I)) - continue; if (--ScanLimit == 0) return false; if (!isGuaranteedToTransferExecutionToSuccessor(&I)) @@ -8046,8 +8044,6 @@ static bool programUndefinedIfUndefOrPoison(const Value *V, // well-defined operands. for (const auto &I : make_range(Begin, End)) { - if (isa(I)) - continue; if (--ScanLimit == 0) break; @@ -8072,8 +8068,6 @@ static bool programUndefinedIfUndefOrPoison(const Value *V, while (true) { for (const auto &I : make_range(Begin, End)) { - if (isa(I)) - continue; if (--ScanLimit == 0) return false; if (mustTriggerUB(&I, YieldsPoison)) diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index 3792b456c836e..43574a54c37dd 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -896,12 +896,7 @@ BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { BasicBlock::iterator BBI = BI->getIterator(); if (BBI != BB->begin()) { --BBI; - while (isa(BBI)) { - if (BBI == BB->begin()) - break; - --BBI; - } - if (!isa(BBI) && !isa(BBI)) + if (!isa(BBI)) return nullptr; } @@ -2981,10 +2976,9 @@ bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, // Make sure there are no instructions between the first instruction // and return. BasicBlock::const_iterator BI = BB->getFirstNonPHIIt(); - // Skip over debug and the bitcast. - while (isa(BI) || &*BI == BCI || &*BI == EVI || - isa(BI) || isLifetimeEndOrBitCastFor(&*BI) || - isFakeUse(&*BI)) + // Skip over pseudo-probes and the bitcast. + while (&*BI == BCI || &*BI == EVI || isa(BI) || + isLifetimeEndOrBitCastFor(&*BI) || isFakeUse(&*BI)) BI = std::next(BI); if (&*BI != RetI) return false; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index e6a1dc930685c..36be01e837b7b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -1320,10 +1320,7 @@ void SelectionDAGBuilder::visit(const Instruction &I) { HandlePHINodesInSuccessorBlocks(I.getParent()); } - // Increase the SDNodeOrder if dealing with a non-debug instruction. - if (!isa(I)) - ++SDNodeOrder; - + ++SDNodeOrder; CurInst = &I; // Set inserted listener only if required. diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index f599637564715..7960eb097fea4 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -1507,7 +1507,6 @@ static bool isFoldedOrDeadInstruction(const Instruction *I, const FunctionLoweringInfo &FuncInfo) { return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded. !I->isTerminator() && // Terminators aren't folded. - !isa(I) && // Debug instructions aren't folded. !I->isEHPad() && // EH pad instructions aren't folded. !FuncInfo.isExportedInst(I); // Exported instrs must be computed. } diff --git a/llvm/lib/IR/DebugInfo.cpp b/llvm/lib/IR/DebugInfo.cpp index 2a84e7bae0f10..4fb9b5015aa86 100644 --- a/llvm/lib/IR/DebugInfo.cpp +++ b/llvm/lib/IR/DebugInfo.cpp @@ -586,11 +586,6 @@ bool llvm::stripDebugInfo(Function &F) { DenseMap LoopIDsMap; for (BasicBlock &BB : F) { for (Instruction &I : llvm::make_early_inc_range(BB)) { - if (isa(&I)) { - I.eraseFromParent(); - Changed = true; - continue; - } if (I.getDebugLoc()) { Changed = true; I.setDebugLoc(DebugLoc()); diff --git a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp index 0c0b512e3b6ce..75c7dd944b467 100644 --- a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp +++ b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp @@ -369,8 +369,7 @@ Instruction *AArch64StackTagging::collectInitializers(Instruction *StartInst, unsigned Count = 0; for (; Count < ClScanLimit && !BI->isTerminator(); ++BI) { - if (!isa(*BI)) - ++Count; + ++Count; if (isNoModRef(AA->getModRefInfo(&*BI, AllocaLoc))) continue; diff --git a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp index 9604f252dd3df..c2eb24b482d44 100644 --- a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp +++ b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp @@ -2318,7 +2318,7 @@ bool HexagonLoopIdiomRecognize::coverLoop(Loop *L, // instructions in it that are not involved in the original set Insts. for (auto *B : L->blocks()) { for (auto &In : *B) { - if (isa(In) || isa(In)) + if (isa(In)) continue; if (!Worklist.count(&In) && In.mayHaveSideEffects()) return false; diff --git a/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp b/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp index 47bb20f4aa073..d0a5be8b2e23a 100644 --- a/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp +++ b/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp @@ -117,7 +117,7 @@ class PPCBoolRetToInt : public FunctionPass { // A PHINode is Promotable if: // 1. Its type is i1 AND - // 2. All of its uses are ReturnInt, CallInst, PHINode, or DbgInfoIntrinsic + // 2. All of its uses are ReturnInt, CallInst, or PHINode // AND // 3. All of its operands are Constant or Argument or // CallInst or PHINode AND @@ -136,8 +136,7 @@ class PPCBoolRetToInt : public FunctionPass { for (const PHINode *P : Promotable) { // Condition 2 and 3 auto IsValidUser = [] (const Value *V) -> bool { - return isa(V) || isa(V) || isa(V) || - isa(V); + return isa(V) || isa(V) || isa(V); }; auto IsValidOperand = [] (const Value *V) -> bool { return isa(V) || isa(V) || isa(V) || diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp index f62361d334704..8c156c93ba8d1 100644 --- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp +++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp @@ -719,9 +719,7 @@ static bool foldLoadsRecursive(Value *V, LoadOps &LOps, const DataLayout &DL, if (Inst.mayWriteToMemory() && isModSet(AA.getModRefInfo(&Inst, Loc))) return false; - // Ignore debug info so that's not counted against MaxInstrsToScan. - // Otherwise debug info could affect codegen. - if (!isa(Inst) && ++NumScanned > MaxInstrsToScan) + if (++NumScanned > MaxInstrsToScan) return false; } diff --git a/llvm/lib/Transforms/IPO/IROutliner.cpp b/llvm/lib/Transforms/IPO/IROutliner.cpp index cb18b55ae2183..2c17863266a97 100644 --- a/llvm/lib/Transforms/IPO/IROutliner.cpp +++ b/llvm/lib/Transforms/IPO/IROutliner.cpp @@ -717,8 +717,6 @@ static void moveFunctionData(Function &Old, Function &New, if (ReturnInst *RI = dyn_cast(I)) NewEnds.insert(std::make_pair(RI->getReturnValue(), &CurrBB)); - std::vector DebugInsts; - for (Instruction &Val : CurrBB) { // Since debug-info originates from many different locations in the // program, it will cause incorrect reporting from a debugger if we keep @@ -749,21 +747,12 @@ static void moveFunctionData(Function &Old, Function &New, // From this point we are only handling call instructions. CallInst *CI = cast(&Val); - // Collect debug intrinsics for later removal. - if (isa(CI)) { - DebugInsts.push_back(&Val); - continue; - } - // Edit the scope of called functions inside of outlined functions. if (DISubprogram *SP = New.getSubprogram()) { DILocation *DI = DILocation::get(New.getContext(), 0, 0, SP); Val.setDebugLoc(DI); } } - - for (Instruction *I : DebugInsts) - I->eraseFromParent(); } } diff --git a/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp b/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp index dda3d5a788157..7fd7d4d4f750b 100644 --- a/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp @@ -385,8 +385,7 @@ void SampleProfileProber::instrumentOneFunc(Function &F, TargetMachine *TM) { // line number. Real instructions generated by optimizations may not come // with a line number either. auto HasValidDbgLine = [](Instruction *J) { - return !isa(J) && !isa(J) && - !J->isLifetimeStartOrEnd() && J->getDebugLoc(); + return !isa(J) && !J->isLifetimeStartOrEnd() && J->getDebugLoc(); }; Instruction *J = &*BB->getFirstInsertionPt(); diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index e261807bbc035..9b7325518dac3 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -4758,11 +4758,7 @@ bool InstCombinerImpl::freezeOtherUses(FreezeInst &FI) { MoveBefore = *MoveBeforeOpt; } - // Don't move to the position of a debug intrinsic. - if (isa(MoveBefore)) - MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator(); - // Re-point iterator to come after any debug-info records, if we're - // running in "RemoveDIs" mode + // Re-point iterator to come after any debug-info records. MoveBefore.setHeadBit(false); bool Changed = false; @@ -5554,11 +5550,9 @@ bool InstCombinerImpl::prepareWorklist(Function &F) { continue; unsigned NumDeadInstInBB; - unsigned NumDeadDbgInstInBB; - std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) = - removeAllNonTerminatorAndEHPadInstructions(&BB); + NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB); - MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0; + MadeIRChange |= NumDeadInstInBB != 0; NumDeadInst += NumDeadInstInBB; } diff --git a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp index 9351a42581ba0..3dfb36f4f1815 100644 --- a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp +++ b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp @@ -583,10 +583,6 @@ static bool functionHasLines(const Function &F, unsigned &EndLine) { EndLine = 0; for (const auto &BB : F) { for (const auto &I : BB) { - // Debug intrinsic locations correspond to the location of the - // declaration, not necessarily any statements or expressions. - if (isa(&I)) continue; - const DebugLoc &Loc = I.getDebugLoc(); if (!Loc) continue; @@ -874,10 +870,6 @@ bool GCOVProfiler::emitProfileNotes( } for (const auto &I : BB) { - // Debug intrinsic locations correspond to the location of the - // declaration, not necessarily any statements or expressions. - if (isa(&I)) continue; - const DebugLoc &Loc = I.getDebugLoc(); if (!Loc) continue; diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index ec9f78edfeb1c..8ae6f7745a9e7 100644 --- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -527,8 +527,7 @@ bool ThreadSanitizer::sanitizeFunction(Function &F, AtomicAccesses.push_back(&Inst); else if (isa(Inst) || isa(Inst)) LocalLoadsAndStores.push_back(&Inst); - else if ((isa(Inst) && !isa(Inst)) || - isa(Inst)) { + else if (isa(Inst) || isa(Inst)) { if (CallInst *CI = dyn_cast(&Inst)) maybeMarkSanitizerLibraryCallNoBuiltin(CI, &TLI); if (isa(Inst)) diff --git a/llvm/lib/Transforms/Scalar/ADCE.cpp b/llvm/lib/Transforms/Scalar/ADCE.cpp index ea907af96edd9..985b9c0e53125 100644 --- a/llvm/lib/Transforms/Scalar/ADCE.cpp +++ b/llvm/lib/Transforms/Scalar/ADCE.cpp @@ -562,20 +562,7 @@ ADCEChanged AggressiveDeadCodeElimination::removeDeadInstructions() { if (isLive(&I)) continue; - if (auto *DII = dyn_cast(&I)) { - // Avoid removing a dbg.assign that is linked to instructions because it - // holds information about an existing store. - if (auto *DAI = dyn_cast(DII)) - if (!at::getAssignmentInsts(DAI).empty()) - continue; - // Check if the scope of this variable location is alive. - if (AliveScopes.count(DII->getDebugLoc()->getScope())) - continue; - - // Fallthrough and drop the intrinsic. - } else { - Changed.ChangedNonDebugInstr = true; - } + Changed.ChangedNonDebugInstr = true; // Prepare to delete. Worklist.push_back(&I); diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp index a0eed31fde792..aa9dcfcda7b3e 100644 --- a/llvm/lib/Transforms/Scalar/GVN.cpp +++ b/llvm/lib/Transforms/Scalar/GVN.cpp @@ -2595,10 +2595,6 @@ bool GVNPass::propagateEquality(Value *LHS, Value *RHS, /// When calculating availability, handle an instruction /// by inserting it into the appropriate sets. bool GVNPass::processInstruction(Instruction *I) { - // Ignore dbg info intrinsics. - if (isa(I)) - return false; - // If the instruction can be easily simplified then do so now in preference // to value numbering it. Value numbering often exposes redundancies, for // example if it determines that %y is equal to %x then the instruction @@ -2884,8 +2880,7 @@ bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred, bool GVNPass::performScalarPRE(Instruction *CurInst) { if (isa(CurInst) || CurInst->isTerminator() || isa(CurInst) || CurInst->getType()->isVoidTy() || - CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || - isa(CurInst)) + CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects()) return false; // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp index 1c2e1531e47d8..0acbaf58a8f74 100644 --- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp +++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp @@ -1166,8 +1166,7 @@ std::pair GVNHoist::hoistExpressions(Function &F) { SI.insert(Store, VN); else if (auto *Call = dyn_cast(&I1)) { if (auto *Intr = dyn_cast(Call)) { - if (isa(Intr) || - Intr->getIntrinsicID() == Intrinsic::assume || + if (Intr->getIntrinsicID() == Intrinsic::assume || Intr->getIntrinsicID() == Intrinsic::sideeffect) continue; } diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp index 3024ccb330b1a..857cfc1216b3f 100644 --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -1204,10 +1204,6 @@ bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, return !Invalidated; } else if (CallInst *CI = dyn_cast(&I)) { - // Don't sink or hoist dbg info; it's legal, but not useful. - if (isa(I)) - return false; - // Don't sink calls which can throw. if (CI->mayThrow()) return false; diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 242e571c072af..5c71491033a4f 100644 --- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -5613,8 +5613,7 @@ BasicBlock::iterator LSRInstance::AdjustInsertPositionForExpand( } } - assert(!isa(LowestIP) && !LowestIP->isEHPad() - && !isa(LowestIP) && + assert(!isa(LowestIP) && !LowestIP->isEHPad() && "Insertion point must be a normal instruction"); // Then, climb up the immediate dominator tree as far as we can go while @@ -5627,9 +5626,6 @@ BasicBlock::iterator LSRInstance::AdjustInsertPositionForExpand( // Ignore landingpad instructions. while (IP->isEHPad()) ++IP; - // Ignore debug intrinsics. - while (isa(IP)) ++IP; - // Set IP below instructions recently inserted by SCEVExpander. This keeps the // IP consistent across expansions and allows the previously inserted // instructions to be reused by subsequent expansion. diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp index afa7abfea419e..a22d84dcf014d 100644 --- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -551,7 +551,7 @@ static std::optional analyzeLoopUnrollCost( for (Instruction &I : *BB) { // These won't get into the final code - don't even try calculating the // cost for them. - if (isa(I) || EphValues.count(&I)) + if (EphValues.count(&I)) continue; // Track this instruction's expected baseline cost when executing the diff --git a/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp b/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp index cb202f5f71b91..f053e202655be 100644 --- a/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp +++ b/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp @@ -296,10 +296,6 @@ bool SpeculativeExecutionPass::considerHoistingFromTo( }; auto AllPrecedingUsesFromBlockHoisted = [&HasNoUnhoistedInstr](const User *U) { - // Do not hoist any debug info intrinsics. - if (isa(U)) - return false; - return HasNoUnhoistedInstr(U->operand_values()); }; @@ -313,9 +309,7 @@ bool SpeculativeExecutionPass::considerHoistingFromTo( if (TotalSpeculationCost > SpecExecMaxSpeculationCost) return false; // too much to hoist } else { - // Debug info intrinsics should not be counted for threshold. - if (!isa(I)) - NotHoistedInstCount++; + NotHoistedInstCount++; if (NotHoistedInstCount > SpecExecMaxNotHoisted) return false; // too much left behind NotHoisted.insert(&I); diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp index c71c5a70a12fd..e7d989a43840d 100644 --- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp +++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp @@ -239,8 +239,7 @@ static bool markTails(Function &F, OptimizationRemarkEmitter *ORE) { // A PseudoProbeInst has the IntrInaccessibleMemOnly tag hence it is // considered accessing memory and will be marked as a tail call if we // don't bail out here. - if (!CI || CI->isTailCall() || isa(&I) || - isa(&I)) + if (!CI || CI->isTailCall() || isa(&I)) continue; // Bail out for intrinsic stackrestore call because it can modify @@ -335,9 +334,6 @@ static bool markTails(Function &F, OptimizationRemarkEmitter *ORE) { /// instructions between the call and this instruction are movable. /// static bool canMoveAboveCall(Instruction *I, CallInst *CI, AliasAnalysis *AA) { - if (isa(I)) - return true; - if (const IntrinsicInst *II = dyn_cast(I)) if (II->getIntrinsicID() == Intrinsic::lifetime_end && llvm::findAllocaForValue(II->getArgOperand(1))) @@ -396,12 +392,6 @@ static bool canTransformAccumulatorRecursion(Instruction *I, CallInst *CI) { return true; } -static Instruction *firstNonDbg(BasicBlock::iterator I) { - while (isa(I)) - ++I; - return &*I; -} - namespace { class TailRecursionEliminator { Function &F; @@ -493,9 +483,8 @@ CallInst *TailRecursionEliminator::findTRECandidate(BasicBlock *BB) { // double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call // and disable this xform in this case, because the code generator will // lower the call to fabs into inline code. - if (BB == &F.getEntryBlock() && - firstNonDbg(BB->front().getIterator()) == CI && - firstNonDbg(std::next(BB->begin())) == TI && CI->getCalledFunction() && + if (BB == &F.getEntryBlock() && &BB->front() == CI && + &*std::next(BB->begin()) == TI && CI->getCalledFunction() && !TTI->isLoweredToCall(CI->getCalledFunction())) { // A single-block function with just a call and a return. Check that // the arguments match. diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp index 1210bdf4a1c98..9883974c55e3b 100644 --- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp +++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp @@ -471,10 +471,6 @@ CodeExtractor::getLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC, Info.LifeEnd = IntrInst; continue; } - // At this point, permit debug uses outside of the region. - // This is fixed in a later call to fixupDebugInfoPostExtraction(). - if (isa(IntrInst)) - continue; } // Find untracked uses of the address, bail. if (!definedInRegion(Blocks, U)) @@ -1077,10 +1073,6 @@ static void applyFirstDebugLoc(Function *oldFunction, return any_of(*BB, [&BranchI](const Instruction &I) { if (!I.getDebugLoc()) return false; - // Don't use source locations attached to debug-intrinsics: they could - // be from completely unrelated scopes. - if (isa(I)) - return false; BranchI->setDebugLoc(I.getDebugLoc()); return true; }); @@ -1329,7 +1321,6 @@ static void fixupDebugInfoPostExtraction(Function &OldFunc, Function &NewFunc, // 2) They need to point to fresh metadata, e.g. because they currently // point to a variable in the wrong scope. SmallDenseMap RemappedMetadata; - SmallVector DebugIntrinsicsToDelete; SmallVector DVRsToDelete; DenseMap Cache; @@ -1370,55 +1361,29 @@ static void fixupDebugInfoPostExtraction(Function &OldFunc, Function &NewFunc, } DbgVariableRecord &DVR = cast(DR); - // Apply the two updates that dbg.values get: invalid operands, and - // variable metadata fixup. + // If any of the used locations are invalid, delete the record. if (any_of(DVR.location_ops(), IsInvalidLocation)) { DVRsToDelete.push_back(&DVR); continue; } + + // DbgAssign intrinsics have an extra Value argument: if (DVR.isDbgAssign() && IsInvalidLocation(DVR.getAddress())) { DVRsToDelete.push_back(&DVR); continue; } + + // If the variable was in the scope of the old function, i.e. it was not + // inlined, point the intrinsic to a fresh variable within the new + // function. if (!DVR.getDebugLoc().getInlinedAt()) DVR.setVariable(GetUpdatedDIVariable(DVR.getVariable())); } }; - for (Instruction &I : instructions(NewFunc)) { + for (Instruction &I : instructions(NewFunc)) UpdateDbgRecordsOnInst(I); - auto *DII = dyn_cast(&I); - if (!DII) - continue; - - // Point the intrinsic to a fresh label within the new function if the - // intrinsic was not inlined from some other function. - if (auto *DLI = dyn_cast(&I)) { - UpdateDbgLabel(DLI); - continue; - } - - auto *DVI = cast(DII); - // If any of the used locations are invalid, delete the intrinsic. - if (any_of(DVI->location_ops(), IsInvalidLocation)) { - DebugIntrinsicsToDelete.push_back(DVI); - continue; - } - // DbgAssign intrinsics have an extra Value argument: - if (auto *DAI = dyn_cast(DVI); - DAI && IsInvalidLocation(DAI->getAddress())) { - DebugIntrinsicsToDelete.push_back(DVI); - continue; - } - // If the variable was in the scope of the old function, i.e. it was not - // inlined, point the intrinsic to a fresh variable within the new function. - if (!DVI->getDebugLoc().getInlinedAt()) - DVI->setVariable(GetUpdatedDIVariable(DVI->getVariable())); - } - - for (auto *DII : DebugIntrinsicsToDelete) - DII->eraseFromParent(); for (auto *DVR : DVRsToDelete) DVR->getMarker()->MarkedInstr->dropOneDbgRecord(DVR); DIB.finalizeSubprogram(NewSP); diff --git a/llvm/lib/Transforms/Utils/Debugify.cpp b/llvm/lib/Transforms/Utils/Debugify.cpp index 729813a92f516..7daeec1b591ff 100644 --- a/llvm/lib/Transforms/Utils/Debugify.cpp +++ b/llvm/lib/Transforms/Utils/Debugify.cpp @@ -353,7 +353,7 @@ bool llvm::collectDebugInfoMetadata(Module &M, // Cllect dbg.values and dbg.declare. if (DebugifyLevel > Level::Locations) { - auto HandleDbgVariable = [&](auto *DbgVar) { + auto HandleDbgVariable = [&](DbgVariableRecord *DbgVar) { if (!SP) return; // Skip inlined variables. @@ -368,14 +368,8 @@ bool llvm::collectDebugInfoMetadata(Module &M, }; for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) HandleDbgVariable(&DVR); - if (auto *DVI = dyn_cast(&I)) - HandleDbgVariable(DVI); } - // Skip debug instructions other than dbg.value and dbg.declare. - if (isa(&I)) - continue; - LLVM_DEBUG(dbgs() << " Collecting info for inst: " << I << '\n'); DebugInfoBeforePass.InstToDelete.insert({&I, &I}); @@ -597,7 +591,7 @@ bool llvm::checkDebugInfoMetadata(Module &M, // Collect dbg.values and dbg.declares. if (DebugifyLevel > Level::Locations) { - auto HandleDbgVariable = [&](auto *DbgVar) { + auto HandleDbgVariable = [&](DbgVariableRecord *DbgVar) { if (!SP) return; // Skip inlined variables. @@ -612,14 +606,8 @@ bool llvm::checkDebugInfoMetadata(Module &M, }; for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) HandleDbgVariable(&DVR); - if (auto *DVI = dyn_cast(&I)) - HandleDbgVariable(DVI); } - // Skip debug instructions other than dbg.value and dbg.declare. - if (isa(&I)) - continue; - LLVM_DEBUG(dbgs() << " Collecting info for inst: " << I << '\n'); DebugInfoAfterPass.DILocations.insert({&I, hasLoc(I)}); diff --git a/llvm/lib/Transforms/Utils/Evaluator.cpp b/llvm/lib/Transforms/Utils/Evaluator.cpp index d1db2ee29f3a2..3a5c7a3b1738e 100644 --- a/llvm/lib/Transforms/Utils/Evaluator.cpp +++ b/llvm/lib/Transforms/Utils/Evaluator.cpp @@ -353,13 +353,6 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB, } else if (isa(CurInst) || isa(CurInst)) { CallBase &CB = *cast(&*CurInst); - // Debug info can safely be ignored here. - if (isa(CB)) { - LLVM_DEBUG(dbgs() << "Ignoring debug info.\n"); - ++CurInst; - continue; - } - // Cannot handle inline asm. if (CB.isInlineAsm()) { LLVM_DEBUG(dbgs() << "Found inline asm, can not evaluate.\n"); diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp index f47c467d15140..7df5e9958182c 100644 --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -1927,16 +1927,11 @@ static void fixupLineNumbers(Function *Fn, Function::iterator FI, } } - // Remove debug info intrinsics if we're not keeping inline info. + // Remove debug info records if we're not keeping inline info. if (NoInlineLineTables) { BasicBlock::iterator BI = FI->begin(); while (BI != FI->end()) { - if (isa(BI)) { - BI = BI->eraseFromParent(); - continue; - } else { - BI->dropDbgRecords(); - } + BI->dropDbgRecords(); ++BI; } } diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp index a3252a69874d3..03644b1a41889 100644 --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -2849,10 +2849,8 @@ bool llvm::handleUnreachableTerminator( return Changed; } -std::pair -llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) { +unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) { unsigned NumDeadInst = 0; - unsigned NumDeadDbgInst = 0; // Delete the instructions backwards, as it has a reduced likelihood of // having to update as many def-use and use-def chains. Instruction *EndInst = BB->getTerminator(); // Last not to be deleted. @@ -2871,15 +2869,12 @@ llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) { EndInst = Inst; continue; } - if (isa(Inst)) - ++NumDeadDbgInst; - else - ++NumDeadInst; + ++NumDeadInst; // RemoveDIs: erasing debug-info must be done manually. Inst->dropDbgRecords(); Inst->eraseFromParent(); } - return {NumDeadInst, NumDeadDbgInst}; + return NumDeadInst; } unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA, diff --git a/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp b/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp index 6b42503b2e015..66d0573e83f65 100644 --- a/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp +++ b/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp @@ -547,36 +547,22 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { // possible or create a clone in the OldPreHeader if not. Instruction *LoopEntryBranch = OrigPreheader->getTerminator(); - // Record all debug intrinsics preceding LoopEntryBranch to avoid + // Record all debug records preceding LoopEntryBranch to avoid // duplication. - using DbgIntrinsicHash = + using DbgHash = std::pair, DIExpression *>; - auto makeHash = [](auto *D) -> DbgIntrinsicHash { + auto makeHash = [](const DbgVariableRecord *D) -> DbgHash { auto VarLocOps = D->location_ops(); return {{hash_combine_range(VarLocOps), D->getVariable()}, D->getExpression()}; }; - SmallDenseSet DbgIntrinsics; - for (Instruction &I : llvm::drop_begin(llvm::reverse(*OrigPreheader))) { - if (auto *DII = dyn_cast(&I)) { - DbgIntrinsics.insert(makeHash(DII)); - // Until RemoveDIs supports dbg.declares in DbgVariableRecord format, - // we'll need to collect DbgVariableRecords attached to any other debug - // intrinsics. - for (const DbgVariableRecord &DVR : - filterDbgVars(DII->getDbgRecordRange())) - DbgIntrinsics.insert(makeHash(&DVR)); - } else { - break; - } - } - + SmallDenseSet DbgRecords; // Build DbgVariableRecord hashes for DbgVariableRecords attached to the - // terminator, which isn't considered in the loop above. + // terminator. for (const DbgVariableRecord &DVR : filterDbgVars(OrigPreheader->getTerminator()->getDbgRecordRange())) - DbgIntrinsics.insert(makeHash(&DVR)); + DbgRecords.insert(makeHash(&DVR)); // Remember the local noalias scope declarations in the header. After the // rotation, they must be duplicated and the scope must be cloned. This @@ -623,7 +609,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { // memory (without proving that the loop doesn't write). if (L->hasLoopInvariantOperands(Inst) && !Inst->mayReadFromMemory() && !Inst->mayWriteToMemory() && !Inst->isTerminator() && - !isa(Inst) && !isa(Inst) && + !isa(Inst) && // It is not safe to hoist the value of these instructions in // coroutines, as the addresses of otherwise eligible variables (e.g. // thread-local variables and errno) may change if the coroutine is @@ -642,7 +628,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { // Erase anything we've seen before. for (DbgVariableRecord &DVR : make_early_inc_range(filterDbgVars(DbgValueRange))) - if (DbgIntrinsics.count(makeHash(&DVR))) + if (DbgRecords.count(makeHash(&DVR))) DVR.eraseFromParent(); } @@ -671,7 +657,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { // Erase anything we've seen before. for (DbgVariableRecord &DVR : make_early_inc_range(filterDbgVars(Range))) - if (DbgIntrinsics.count(makeHash(&DVR))) + if (DbgRecords.count(makeHash(&DVR))) DVR.eraseFromParent(); } @@ -679,13 +665,6 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { RemapInstruction(C, ValueMap, RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); - // Avoid inserting the same intrinsic twice. - if (auto *DII = dyn_cast(C)) - if (DbgIntrinsics.count(makeHash(DII))) { - C->eraseFromParent(); - continue; - } - // With the operands remapped, see if the instruction constant folds or is // otherwise simplifyable. This commonly occurs because the entry from PHI // nodes allows icmps and other instructions to fold. @@ -806,7 +785,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap, SE, &InsertedPHIs); - // Attach dbg.value intrinsics to the new phis if that phi uses a value that + // Attach debug records to the new phis if that phi uses a value that // previously had debug metadata attached. This keeps the debug info // up-to-date in the loop body. if (!InsertedPHIs.empty()) @@ -952,9 +931,6 @@ static bool shouldSpeculateInstrs(BasicBlock::iterator Begin, if (!isSafeToSpeculativelyExecute(&*I)) return false; - if (isa(I)) - continue; - switch (I->getOpcode()) { default: return false; diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp index 70afd4133df7c..24fe08d6c3e4e 100644 --- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp +++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp @@ -182,8 +182,7 @@ SCEVExpander::GetOptimalInsertionPointForCastOf(Value *V) const { BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); while ((isa(IP) && isa(cast(IP)->getOperand(0)) && - cast(IP)->getOperand(0) != A) || - isa(IP)) + cast(IP)->getOperand(0) != A)) ++IP; return IP; } @@ -278,11 +277,6 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, if (IP != BlockBegin) { --IP; for (; ScanLimit; --IP, --ScanLimit) { - // Don't count dbg.value against the ScanLimit, to avoid perturbing the - // generated code. - if (isa(IP)) - ScanLimit++; - auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) { // Ensure that no-wrap flags match. if (isa(I)) { @@ -382,10 +376,6 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *Offset, Value *V, if (IP != BlockBegin) { --IP; for (; ScanLimit; --IP, --ScanLimit) { - // Don't count dbg.value against the ScanLimit, to avoid perturbing the - // generated code. - if (isa(IP)) - ScanLimit++; if (auto *GEP = dyn_cast(IP)) { if (GEP->getPointerOperand() == V && GEP->getSourceElementType() == Builder.getInt8Ty() && @@ -1545,8 +1535,7 @@ Value *SCEVExpander::expand(const SCEV *S) { InsertPt = L->getHeader()->getFirstInsertionPt(); while (InsertPt != Builder.GetInsertPoint() && - (isInsertedInstruction(&*InsertPt) || - isa(&*InsertPt))) { + (isInsertedInstruction(&*InsertPt))) { InsertPt = std::next(InsertPt); } break; diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index f67a6414ca316..8b5813b5a7828 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -1130,17 +1130,14 @@ static void cloneInstructionsIntoPredecessorBlockAndUpdateSSAUses( Instruction *NewBonusInst = BonusInst.clone(); - if (!isa(BonusInst)) { - if (!NewBonusInst->getDebugLoc().isSameSourceLocation( - PTI->getDebugLoc())) { - // Unless the instruction has the same !dbg location as the original - // branch, drop it. When we fold the bonus instructions we want to make - // sure we reset their debug locations in order to avoid stepping on - // dead code caused by folding dead branches. - NewBonusInst->setDebugLoc(DebugLoc::getDropped()); - } else if (const DebugLoc &DL = NewBonusInst->getDebugLoc()) { - mapAtomInstance(DL, VMap); - } + if (!NewBonusInst->getDebugLoc().isSameSourceLocation(PTI->getDebugLoc())) { + // Unless the instruction has the same !dbg location as the original + // branch, drop it. When we fold the bonus instructions we want to make + // sure we reset their debug locations in order to avoid stepping on + // dead code caused by folding dead branches. + NewBonusInst->setDebugLoc(DebugLoc::getDropped()); + } else if (const DebugLoc &DL = NewBonusInst->getDebugLoc()) { + mapAtomInstance(DL, VMap); } RemapInstruction(NewBonusInst, VMap, @@ -1158,9 +1155,6 @@ static void cloneInstructionsIntoPredecessorBlockAndUpdateSSAUses( RemapDbgRecordRange(NewBonusInst->getModule(), Range, VMap, RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); - if (isa(BonusInst)) - continue; - NewBonusInst->takeName(&BonusInst); BonusInst.setName(NewBonusInst->getName() + ".old"); VMap[&BonusInst] = NewBonusInst; @@ -1903,21 +1897,6 @@ bool SimplifyCFGOpt::hoistCommonCodeFromSuccessors(Instruction *TI, Instruction *I1 = &*BB1ItrPair.first; - // Skip debug info if it is not identical. - bool AllDbgInstsAreIdentical = all_of(OtherSuccIterRange, [I1](auto &Iter) { - Instruction *I2 = &*Iter; - return I1->isIdenticalToWhenDefined(I2); - }); - if (!AllDbgInstsAreIdentical) { - while (isa(I1)) - I1 = &*++BB1ItrPair.first; - for (auto &SuccIter : OtherSuccIterRange) { - Instruction *I2 = &*SuccIter; - while (isa(I2)) - I2 = &*++SuccIter; - } - } - bool AllInstsAreIdentical = true; bool HasTerminator = I1->isTerminator(); for (auto &SuccIter : OtherSuccIterRange) { @@ -1965,49 +1944,33 @@ bool SimplifyCFGOpt::hoistCommonCodeFromSuccessors(Instruction *TI, if (AllInstsAreIdentical) { BB1ItrPair.first++; - if (isa(I1)) { - // The debug location is an integral part of a debug info intrinsic - // and can't be separated from it or replaced. Instead of attempting - // to merge locations, simply hoist both copies of the intrinsic. - hoistLockstepIdenticalDbgVariableRecords(TI, I1, OtherInsts); - // We've just hoisted DbgVariableRecords; move I1 after them (before TI) - // and leave any that were not hoisted behind (by calling moveBefore - // rather than moveBeforePreserving). - I1->moveBefore(TI->getIterator()); - for (auto &SuccIter : OtherSuccIterRange) { - auto *I2 = &*SuccIter++; - assert(isa(I2)); - I2->moveBefore(TI->getIterator()); + // For a normal instruction, we just move one to right before the + // branch, then replace all uses of the other with the first. Finally, + // we remove the now redundant second instruction. + hoistLockstepIdenticalDbgVariableRecords(TI, I1, OtherInsts); + // We've just hoisted DbgVariableRecords; move I1 after them (before TI) + // and leave any that were not hoisted behind (by calling moveBefore + // rather than moveBeforePreserving). + I1->moveBefore(TI->getIterator()); + for (auto &SuccIter : OtherSuccIterRange) { + Instruction *I2 = &*SuccIter++; + assert(I2 != I1); + if (!I2->use_empty()) + I2->replaceAllUsesWith(I1); + I1->andIRFlags(I2); + if (auto *CB = dyn_cast(I1)) { + bool Success = CB->tryIntersectAttributes(cast(I2)); + assert(Success && "We should not be trying to hoist callbases " + "with non-intersectable attributes"); + // For NDEBUG Compile. + (void)Success; } - } else { - // For a normal instruction, we just move one to right before the - // branch, then replace all uses of the other with the first. Finally, - // we remove the now redundant second instruction. - hoistLockstepIdenticalDbgVariableRecords(TI, I1, OtherInsts); - // We've just hoisted DbgVariableRecords; move I1 after them (before TI) - // and leave any that were not hoisted behind (by calling moveBefore - // rather than moveBeforePreserving). - I1->moveBefore(TI->getIterator()); - for (auto &SuccIter : OtherSuccIterRange) { - Instruction *I2 = &*SuccIter++; - assert(I2 != I1); - if (!I2->use_empty()) - I2->replaceAllUsesWith(I1); - I1->andIRFlags(I2); - if (auto *CB = dyn_cast(I1)) { - bool Success = CB->tryIntersectAttributes(cast(I2)); - assert(Success && "We should not be trying to hoist callbases " - "with non-intersectable attributes"); - // For NDEBUG Compile. - (void)Success; - } - combineMetadataForCSE(I1, I2, true); - // I1 and I2 are being combined into a single instruction. Its debug - // location is the merged locations of the original instructions. - I1->applyMergedLocation(I1->getDebugLoc(), I2->getDebugLoc()); - I2->eraseFromParent(); - } + combineMetadataForCSE(I1, I2, true); + // I1 and I2 are being combined into a single instruction. Its debug + // location is the merged locations of the original instructions. + I1->applyMergedLocation(I1->getDebugLoc(), I2->getDebugLoc()); + I2->eraseFromParent(); } if (!Changed) NumHoistCommonCode += SuccIterPairs.size(); @@ -2297,11 +2260,8 @@ static void sinkLastInstruction(ArrayRef Blocks) { SmallVector Insts; for (auto *BB : Blocks) { Instruction *I = BB->getTerminator(); - do { - I = I->getPrevNode(); - } while (isa(I) && I != &BB->front()); - if (!isa(I)) - Insts.push_back(I); + I = I->getPrevNode(); + Insts.push_back(I); } // We don't need to do any more checking here; canSinkInstructions should @@ -3234,7 +3194,7 @@ bool SimplifyCFGOpt::speculativelyExecuteBB(BranchInst *BI, // - All of their uses are in ThenBB. SmallDenseMap SinkCandidateUseCounts; - SmallVector SpeculatedDbgIntrinsics; + SmallVector SpeculatedPseudoProbes; unsigned SpeculatedInstructions = 0; bool HoistLoadsStores = Options.HoistLoadsStoresWithCondFaulting; @@ -3243,12 +3203,6 @@ bool SimplifyCFGOpt::speculativelyExecuteBB(BranchInst *BI, StoreInst *SpeculatedStore = nullptr; EphemeralValueTracker EphTracker; for (Instruction &I : reverse(drop_end(*ThenBB))) { - // Skip debug info. - if (isa(I)) { - SpeculatedDbgIntrinsics.push_back(&I); - continue; - } - // Skip pseudo probes. The consequence is we lose track of the branch // probability for ThenBB, which is fine since the optimization here takes // place regardless of the branch probability. @@ -3257,7 +3211,7 @@ bool SimplifyCFGOpt::speculativelyExecuteBB(BranchInst *BI, // the samples collected on the non-conditional path are counted towards // the conditional path. We leave it for the counts inference algorithm to // figure out a proper count for an unknown probe. - SpeculatedDbgIntrinsics.push_back(&I); + SpeculatedPseudoProbes.push_back(&I); continue; } @@ -3388,9 +3342,7 @@ bool SimplifyCFGOpt::speculativelyExecuteBB(BranchInst *BI, // hoisting above. for (auto &I : make_early_inc_range(*ThenBB)) { if (!SpeculatedStoreValue || &I != SpeculatedStore) { - // Don't update the DILocation of dbg.assign intrinsics. - if (!isa(&I)) - I.setDebugLoc(DebugLoc::getDropped()); + I.setDebugLoc(DebugLoc::getDropped()); } I.dropUBImplyingAttrsAndMetadata(); @@ -3402,9 +3354,7 @@ bool SimplifyCFGOpt::speculativelyExecuteBB(BranchInst *BI, } // Hoist the instructions. - // In "RemoveDIs" non-instr debug-info mode, drop DbgVariableRecords attached - // to these instructions, in the same way that dbg.value intrinsics are - // dropped at the end of this block. + // Drop DbgVariableRecords attached to these instructions. for (auto &It : *ThenBB) for (DbgRecord &DR : make_early_inc_range(It.getDbgRecordRange())) // Drop all records except assign-kind DbgVariableRecords (dbg.assign @@ -3442,15 +3392,9 @@ bool SimplifyCFGOpt::speculativelyExecuteBB(BranchInst *BI, PN.setIncomingValue(ThenI, V); } - // Remove speculated dbg intrinsics. - // FIXME: Is it possible to do this in a more elegant way? Moving/merging the - // dbg value for the different flows and inserting it after the select. - for (Instruction *I : SpeculatedDbgIntrinsics) { - // We still want to know that an assignment took place so don't remove - // dbg.assign intrinsics. - if (!isa(I)) - I->eraseFromParent(); - } + // Remove speculated pseudo probes. + for (Instruction *I : SpeculatedPseudoProbes) + I->eraseFromParent(); ++NumSpeculations; return true; @@ -4162,8 +4106,8 @@ bool llvm::foldBranchToCommonDest(BranchInst *BI, DomTreeUpdater *DTU, // Don't check the branch condition comparison itself. if (&I == Cond) continue; - // Ignore dbg intrinsics, and the terminator. - if (isa(I) || isa(I)) + // Ignore the terminator. + if (isa(I)) continue; // I must be safe to execute unconditionally. if (!isSafeToSpeculativelyExecute(&I)) @@ -7762,8 +7706,7 @@ static bool tryToMergeLandingPad(LandingPadInst *LPad, BranchInst *BI, LandingPadInst *LPad2 = dyn_cast(I); if (!LPad2 || !LPad2->isIdenticalTo(LPad)) continue; - for (++I; isa(I); ++I) - ; + ++I; BranchInst *BI2 = dyn_cast(I); if (!BI2 || !BI2->isIdenticalTo(BI)) continue; @@ -7784,12 +7727,6 @@ static bool tryToMergeLandingPad(LandingPadInst *LPad, BranchInst *BI, } } - // The debug info in OtherPred doesn't cover the merged control flow that - // used to go through BB. We need to delete it or update it. - for (Instruction &Inst : llvm::make_early_inc_range(*OtherPred)) - if (isa(Inst)) - Inst.eraseFromParent(); - SmallSetVector UniqueSuccs(succ_begin(BB), succ_end(BB)); for (BasicBlock *Succ : UniqueSuccs) { Succ->removePredecessor(BB); @@ -7837,8 +7774,7 @@ bool SimplifyCFGOpt::simplifyUncondBranch(BranchInst *BI, // constant, try to simplify the block. if (ICmpInst *ICI = dyn_cast(I)) if (ICI->isEquality() && isa(ICI->getOperand(1))) { - for (++I; isa(I); ++I) - ; + ++I; if (I->isTerminator() && tryToSimplifyUncondBranchWithICmpInIt(ICI, Builder)) return true; @@ -7847,8 +7783,7 @@ bool SimplifyCFGOpt::simplifyUncondBranch(BranchInst *BI, // See if we can merge an empty landing pad block with another which is // equivalent. if (LandingPadInst *LPad = dyn_cast(I)) { - for (++I; isa(I); ++I) - ; + ++I; if (I->isTerminator() && tryToMergeLandingPad(LPad, BI, BB, DTU)) return true; } diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp index 8e09e6f8d4935..0c4e5bb3d4721 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -896,13 +896,11 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { } // end of PHI handling // We handle calls that: - // * Are debug info intrinsics. // * Have a mapping to an IR intrinsic. // * Have a vector version available. auto *CI = dyn_cast(&I); if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && - !isa(CI) && !(CI->getCalledFunction() && TLI && (!VFDatabase::getMappings(*CI).empty() || isTLIScalarize(*TLI, *CI)))) { diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index c3ca22dce0cc4..eedd17cccb1c3 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -24297,9 +24297,6 @@ bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { continue; } - if (isa(It)) - continue; - // Try to vectorize reductions that use PHINodes. if (PHINode *P = dyn_cast(It)) { // Check that the PHI is a reduction PHI.