-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[SLP]Fix graph traversal in getSpillCost #124984
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1395,7 +1395,7 @@ class BoUpSLP { | |
|
|
||
| /// \returns the cost incurred by unwanted spills and fills, caused by | ||
| /// holding live values over call sites. | ||
| InstructionCost getSpillCost() const; | ||
| InstructionCost getSpillCost(); | ||
|
|
||
| /// \returns the vectorization cost of the subtree that starts at \p VL. | ||
| /// A negative number means that this is profitable. | ||
|
|
@@ -2958,7 +2958,7 @@ class BoUpSLP { | |
| } | ||
|
|
||
| /// Check if the value is vectorized in the tree. | ||
| bool isVectorized(Value *V) const { | ||
| bool isVectorized(const Value *V) const { | ||
| assert(V && "V cannot be nullptr."); | ||
| return ScalarToTreeEntries.contains(V); | ||
| } | ||
|
|
@@ -3726,7 +3726,7 @@ class BoUpSLP { | |
| #endif | ||
|
|
||
| /// Get list of vector entries, associated with the value \p V. | ||
| ArrayRef<TreeEntry *> getTreeEntries(Value *V) const { | ||
| ArrayRef<TreeEntry *> getTreeEntries(const Value *V) const { | ||
| assert(V && "V cannot be nullptr."); | ||
| auto It = ScalarToTreeEntries.find(V); | ||
| if (It == ScalarToTreeEntries.end()) | ||
|
|
@@ -12160,78 +12160,80 @@ bool BoUpSLP::isTreeNotExtendable() const { | |
| return Res; | ||
| } | ||
|
|
||
| InstructionCost BoUpSLP::getSpillCost() const { | ||
| InstructionCost BoUpSLP::getSpillCost() { | ||
| // Walk from the bottom of the tree to the top, tracking which values are | ||
| // live. When we see a call instruction that is not part of our tree, | ||
| // query TTI to see if there is a cost to keeping values live over it | ||
| // (for example, if spills and fills are required). | ||
| unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); | ||
| InstructionCost Cost = 0; | ||
|
|
||
| SmallPtrSet<Instruction *, 4> LiveValues; | ||
| Instruction *PrevInst = nullptr; | ||
| SmallPtrSet<const TreeEntry *, 4> LiveEntries; | ||
| const TreeEntry *Prev = nullptr; | ||
|
|
||
| // The entries in VectorizableTree are not necessarily ordered by their | ||
| // position in basic blocks. Collect them and order them by dominance so later | ||
| // instructions are guaranteed to be visited first. For instructions in | ||
| // different basic blocks, we only scan to the beginning of the block, so | ||
| // their order does not matter, as long as all instructions in a basic block | ||
| // are grouped together. Using dominance ensures a deterministic order. | ||
| SmallVector<Instruction *, 16> OrderedScalars; | ||
| SmallVector<TreeEntry *, 16> OrderedEntries; | ||
| for (const auto &TEPtr : VectorizableTree) { | ||
| if (TEPtr->State != TreeEntry::Vectorize) | ||
| if (TEPtr->isGather()) | ||
| continue; | ||
| Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); | ||
| if (!Inst) | ||
| continue; | ||
| OrderedScalars.push_back(Inst); | ||
| } | ||
| llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) { | ||
| auto *NodeA = DT->getNode(A->getParent()); | ||
| auto *NodeB = DT->getNode(B->getParent()); | ||
| OrderedEntries.push_back(TEPtr.get()); | ||
| } | ||
| llvm::stable_sort(OrderedEntries, [&](const TreeEntry *TA, | ||
| const TreeEntry *TB) { | ||
| Instruction &A = getLastInstructionInBundle(TA); | ||
| Instruction &B = getLastInstructionInBundle(TB); | ||
| auto *NodeA = DT->getNode(A.getParent()); | ||
| auto *NodeB = DT->getNode(B.getParent()); | ||
| assert(NodeA && "Should only process reachable instructions"); | ||
| assert(NodeB && "Should only process reachable instructions"); | ||
| assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && | ||
| "Different nodes should have different DFS numbers"); | ||
| if (NodeA != NodeB) | ||
| return NodeA->getDFSNumIn() > NodeB->getDFSNumIn(); | ||
| return B->comesBefore(A); | ||
| return B.comesBefore(&A); | ||
| }); | ||
|
|
||
| for (Instruction *Inst : OrderedScalars) { | ||
| if (!PrevInst) { | ||
| PrevInst = Inst; | ||
| for (const TreeEntry *TE : OrderedEntries) { | ||
| if (!Prev) { | ||
| Prev = TE; | ||
| continue; | ||
| } | ||
|
|
||
| // Update LiveValues. | ||
| LiveValues.erase(PrevInst); | ||
| for (auto &J : PrevInst->operands()) { | ||
| if (isa<Instruction>(&*J) && isVectorized(&*J)) | ||
| LiveValues.insert(cast<Instruction>(&*J)); | ||
| LiveEntries.erase(Prev); | ||
| for (unsigned I : seq<unsigned>(Prev->getNumOperands())) { | ||
| const TreeEntry *Op = getVectorizedOperand(Prev, I); | ||
| if (!Op) | ||
| continue; | ||
| assert(!Op->isGather() && "Expected vectorized operand."); | ||
| LiveEntries.insert(Op); | ||
| } | ||
|
|
||
| LLVM_DEBUG({ | ||
| dbgs() << "SLP: #LV: " << LiveValues.size(); | ||
| for (auto *X : LiveValues) | ||
| dbgs() << " " << X->getName(); | ||
| dbgs() << "SLP: #LV: " << LiveEntries.size(); | ||
| for (auto *X : LiveEntries) | ||
| X->dump(); | ||
| dbgs() << ", Looking at "; | ||
| Inst->dump(); | ||
| TE->dump(); | ||
| }); | ||
|
|
||
| // Now find the sequence of instructions between PrevInst and Inst. | ||
| unsigned NumCalls = 0; | ||
| BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), | ||
| PrevInstIt = | ||
| PrevInst->getIterator().getReverse(); | ||
| const Instruction *PrevInst = &getLastInstructionInBundle(Prev); | ||
| BasicBlock::const_reverse_iterator | ||
| InstIt = ++getLastInstructionInBundle(TE).getIterator().getReverse(), | ||
| PrevInstIt = PrevInst->getIterator().getReverse(); | ||
| while (InstIt != PrevInstIt) { | ||
| if (PrevInstIt == PrevInst->getParent()->rend()) { | ||
| PrevInstIt = Inst->getParent()->rbegin(); | ||
| PrevInstIt = getLastInstructionInBundle(TE).getParent()->rbegin(); | ||
| continue; | ||
| } | ||
|
|
||
| auto NoCallIntrinsic = [this](Instruction *I) { | ||
| auto *II = dyn_cast<IntrinsicInst>(I); | ||
| auto NoCallIntrinsic = [this](const Instruction *I) { | ||
| const auto *II = dyn_cast<IntrinsicInst>(I); | ||
| if (!II) | ||
| return false; | ||
| if (II->isAssumeLikeIntrinsic()) | ||
|
|
@@ -12252,25 +12254,47 @@ InstructionCost BoUpSLP::getSpillCost() const { | |
| }; | ||
|
|
||
| // Debug information does not impact spill cost. | ||
| if (isa<CallBase>(&*PrevInstIt) && !NoCallIntrinsic(&*PrevInstIt) && | ||
| &*PrevInstIt != PrevInst) | ||
| // Vectorized calls, represented as vector intrinsics, do not impact spill | ||
| // cost. | ||
| if (const auto *CB = dyn_cast<CallBase>(&*PrevInstIt); | ||
| CB && !NoCallIntrinsic(CB) && | ||
| (!isVectorized(CB) || | ||
| any_of(getTreeEntries(CB), [&](const TreeEntry *TE) { | ||
| auto *CI = cast<CallInst>(TE->getMainOp()); | ||
| Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | ||
|
|
||
| unsigned MinBW = MinBWs.lookup(TE).first; | ||
| SmallVector<Type *> ArgTys = | ||
| buildIntrinsicArgTypes(CI, ID, TE->Scalars.size(), MinBW, TTI); | ||
| Type *ScalarTy = CI->getType(); | ||
| if (MinBW) | ||
| ScalarTy = IntegerType::get(CI->getContext(), MinBW); | ||
| FixedVectorType *VecTy = | ||
| getWidenedType(ScalarTy, TE->Scalars.size()); | ||
| auto VecCallCosts = | ||
| getVectorCallCosts(CI, VecTy, TTI, TLI, ArgTys); | ||
| bool UseIntrinsic = ID != Intrinsic::not_intrinsic && | ||
| VecCallCosts.first <= VecCallCosts.second; | ||
| return !UseIntrinsic; | ||
| }))) | ||
| NumCalls++; | ||
|
|
||
| ++PrevInstIt; | ||
| } | ||
|
|
||
| if (NumCalls) { | ||
| SmallVector<Type *, 4> V; | ||
| for (auto *II : LiveValues) { | ||
| auto *ScalarTy = II->getType(); | ||
| if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy)) | ||
| ScalarTy = VectorTy->getElementType(); | ||
| V.push_back(getWidenedType(ScalarTy, BundleWidth)); | ||
| SmallVector<Type *, 4> EntriesTypes; | ||
| for (const TreeEntry *TE : LiveEntries) { | ||
| auto *ScalarTy = TE->getMainOp()->getType(); | ||
| auto It = MinBWs.find(TE); | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You dropped the if (auto *VectorTy = dyn_cast(ScalarTy)) Which I think was added for revectorization. Is that needed, do does the MainOp->getType() call normalize?
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It was a bug, we need estimate whole vector here. For revec, if the ScalarTy is 4x and there are 2 elements, it estimates the spill cost for vector 2x, but instead it should estimate for vector 8x. |
||
| if (It != MinBWs.end()) | ||
| ScalarTy = IntegerType::get(ScalarTy->getContext(), It->second.first); | ||
| EntriesTypes.push_back(getWidenedType(ScalarTy, TE->getVectorFactor())); | ||
| } | ||
| Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); | ||
| Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(EntriesTypes); | ||
| } | ||
|
|
||
| PrevInst = Inst; | ||
| Prev = TE; | ||
| } | ||
|
|
||
| return Cost; | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can you move this bit to a new review? I agree we should explore in this direction, but these needs to happen once per tree entry, not once per instruction corresponding to that tree entry. I'd also like to get your previous change - with just the isVectorized change - landed so that we can iterate on top of that.