@@ -2245,6 +2245,26 @@ class BoUpSLP {
22452245 Align Alignment, const int64_t Diff, Value *Ptr0,
22462246 Value *PtrN, StridedPtrInfo &SPtrInfo) const;
22472247
2248+ /// Return true if an array of scalar loads can be replaced with a strided
2249+ /// load (with run-time stride).
2250+ /// \param PointerOps list of pointer arguments of loads.
2251+ /// \param ScalarTy type of loads.
2252+ /// \param CommonAlignment common alignement of loads as computed by
2253+ /// `computeCommonAlignment<LoadInst>`.
2254+ /// \param SortedIndicies is a list of indicies computed by this function such
2255+ /// that the sequence `PointerOps[SortedIndices[0]],
2256+ /// PointerOps[SortedIndicies[1]], ..., PointerOps[SortedIndices[n]]` is
2257+ /// ordered by the coefficient of the stride. For example, if PointerOps is
2258+ /// `%base + %stride, %base, %base + 2 * stride` the `SortedIndices` will be
2259+ /// `[1, 0, 2]`. We follow the convention that if `SortedIndices` has to be
2260+ /// `0, 1, 2, 3, ...` we return empty vector for `SortedIndicies`.
2261+ /// \param SPtrInfo If the function return `true`, it also sets all the fields
2262+ /// of `SPtrInfo` necessary to generate the strided load later.
2263+ bool analyzeRtStrideCandidate(ArrayRef<Value *> PointerOps, Type *ScalarTy,
2264+ Align CommonAlignment,
2265+ SmallVectorImpl<unsigned> &SortedIndices,
2266+ StridedPtrInfo &SPtrInfo) const;
2267+
22482268 /// Checks if the given array of loads can be represented as a vectorized,
22492269 /// scatter or just simple gather.
22502270 /// \param VL list of loads.
@@ -6875,6 +6895,24 @@ bool BoUpSLP::isStridedLoad(ArrayRef<Value *> PointerOps, Type *ScalarTy,
68756895 return false;
68766896}
68776897
6898+ bool BoUpSLP::analyzeRtStrideCandidate(ArrayRef<Value *> PointerOps,
6899+ Type *ScalarTy, Align CommonAlignment,
6900+ SmallVectorImpl<unsigned> &SortedIndices,
6901+ StridedPtrInfo &SPtrInfo) const {
6902+ const unsigned Sz = PointerOps.size();
6903+ FixedVectorType *StridedLoadTy = getWidenedType(ScalarTy, Sz);
6904+ if (Sz <= MinProfitableStridedLoads || !TTI->isTypeLegal(StridedLoadTy) ||
6905+ !TTI->isLegalStridedLoadStore(StridedLoadTy, CommonAlignment))
6906+ return false;
6907+ if (const SCEV *Stride =
6908+ calculateRtStride(PointerOps, ScalarTy, *DL, *SE, SortedIndices)) {
6909+ SPtrInfo.Ty = getWidenedType(ScalarTy, PointerOps.size());
6910+ SPtrInfo.StrideSCEV = Stride;
6911+ return true;
6912+ }
6913+ return false;
6914+ }
6915+
68786916BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
68796917 ArrayRef<Value *> VL, const Value *VL0, SmallVectorImpl<unsigned> &Order,
68806918 SmallVectorImpl<Value *> &PointerOps, StridedPtrInfo &SPtrInfo,
@@ -6915,15 +6953,9 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
69156953 auto *VecTy = getWidenedType(ScalarTy, Sz);
69166954 Align CommonAlignment = computeCommonAlignment<LoadInst>(VL);
69176955 if (!IsSorted) {
6918- if (Sz > MinProfitableStridedLoads && TTI->isTypeLegal(VecTy)) {
6919- if (const SCEV *Stride =
6920- calculateRtStride(PointerOps, ScalarTy, *DL, *SE, Order);
6921- Stride && TTI->isLegalStridedLoadStore(VecTy, CommonAlignment)) {
6922- SPtrInfo.Ty = getWidenedType(ScalarTy, PointerOps.size());
6923- SPtrInfo.StrideSCEV = Stride;
6924- return LoadsState::StridedVectorize;
6925- }
6926- }
6956+ if (analyzeRtStrideCandidate(PointerOps, ScalarTy, CommonAlignment, Order,
6957+ SPtrInfo))
6958+ return LoadsState::StridedVectorize;
69276959
69286960 if (!TTI->isLegalMaskedGather(VecTy, CommonAlignment) ||
69296961 TTI->forceScalarizeMaskedGather(VecTy, CommonAlignment))
0 commit comments