Skip to content

Commit 438c303

Browse files
author
Mikhail Gudim
committed
[SLPVectorizer] Move size checks (NFC).
Add the `analyzeRtStrideCandidate` function. In the future commits we're going to add the capability to widen strided loads to it. So, in this commit, we move the size / type checks into it, since it can possibly change size / type of load.
1 parent 2512611 commit 438c303

File tree

1 file changed

+29
-9
lines changed

1 file changed

+29
-9
lines changed

llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp

Lines changed: 29 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2245,6 +2245,11 @@ class BoUpSLP {
22452245
Align Alignment, const int64_t Diff, Value *Ptr0,
22462246
Value *PtrN, StridedPtrInfo &SPtrInfo) const;
22472247

2248+
bool analyzeRtStrideCandidate(ArrayRef<Value *> PointerOps, Type *ScalarTy,
2249+
Align CommonAlignment,
2250+
SmallVectorImpl<unsigned> &SortedIndices,
2251+
StridedPtrInfo &SPtrInfo) const;
2252+
22482253
/// Checks if the given array of loads can be represented as a vectorized,
22492254
/// scatter or just simple gather.
22502255
/// \param VL list of loads.
@@ -6875,6 +6880,27 @@ bool BoUpSLP::isStridedLoad(ArrayRef<Value *> PointerOps, Type *ScalarTy,
68756880
return false;
68766881
}
68776882

6883+
bool BoUpSLP::analyzeRtStrideCandidate(ArrayRef<Value *> PointerOps,
6884+
Type *ScalarTy, Align CommonAlignment,
6885+
SmallVectorImpl<unsigned> &SortedIndices,
6886+
StridedPtrInfo &SPtrInfo) const {
6887+
return true;
6888+
const unsigned Sz = PointerOps.size();
6889+
// TODO: VecSz may change if we widen the strided load.
6890+
unsigned VecSz = Sz;
6891+
FixedVectorType *StridedLoadTy = getWidenedType(ScalarTy, VecSz);
6892+
if (!(Sz > MinProfitableStridedLoads && TTI->isTypeLegal(StridedLoadTy) &&
6893+
TTI->isLegalStridedLoadStore(StridedLoadTy, CommonAlignment)))
6894+
return false;
6895+
if (const SCEV *Stride =
6896+
calculateRtStride(PointerOps, ScalarTy, *DL, *SE, SortedIndices)) {
6897+
SPtrInfo.Ty = getWidenedType(ScalarTy, PointerOps.size());
6898+
SPtrInfo.StrideSCEV = Stride;
6899+
return true;
6900+
}
6901+
return false;
6902+
}
6903+
68786904
BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
68796905
ArrayRef<Value *> VL, const Value *VL0, SmallVectorImpl<unsigned> &Order,
68806906
SmallVectorImpl<Value *> &PointerOps, StridedPtrInfo &SPtrInfo,
@@ -6915,15 +6941,9 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
69156941
auto *VecTy = getWidenedType(ScalarTy, Sz);
69166942
Align CommonAlignment = computeCommonAlignment<LoadInst>(VL);
69176943
if (!IsSorted) {
6918-
if (Sz > MinProfitableStridedLoads && TTI->isTypeLegal(VecTy)) {
6919-
if (const SCEV *Stride =
6920-
calculateRtStride(PointerOps, ScalarTy, *DL, *SE, Order);
6921-
Stride && TTI->isLegalStridedLoadStore(VecTy, CommonAlignment)) {
6922-
SPtrInfo.Ty = getWidenedType(ScalarTy, PointerOps.size());
6923-
SPtrInfo.StrideSCEV = Stride;
6924-
return LoadsState::StridedVectorize;
6925-
}
6926-
}
6944+
if (analyzeRtStrideCandidate(PointerOps, ScalarTy, CommonAlignment, Order,
6945+
SPtrInfo))
6946+
return LoadsState::StridedVectorize;
69276947

69286948
if (!TTI->isLegalMaskedGather(VecTy, CommonAlignment) ||
69296949
TTI->forceScalarizeMaskedGather(VecTy, CommonAlignment))

0 commit comments

Comments
 (0)