Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 41 additions & 10 deletions llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -956,6 +956,14 @@ class LoopVectorizationCostModel {
return expectedCost(UserVF).isValid();
}

/// \return True if maximizing vector bandwidth is enabled by the target or
/// user options, for the given register kind.
bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind);

/// \return True if maximizing vector bandwidth is enabled by the target or
/// user options, for the given vector factor.
bool useMaxBandwidth(ElementCount VF);

/// \return The size (in bits) of the smallest and widest types in the code
/// that needs to be vectorized. We ignore values that remain scalar such as
/// 64 bit loop indices.
Expand Down Expand Up @@ -3918,6 +3926,20 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
return FixedScalableVFPair::getNone();
}

bool LoopVectorizationCostModel::useMaxBandwidth(ElementCount VF) {
return useMaxBandwidth(VF.isScalable()
? TargetTransformInfo::RGK_ScalableVector
: TargetTransformInfo::RGK_FixedWidthVector);
}

bool LoopVectorizationCostModel::useMaxBandwidth(
TargetTransformInfo::RegisterKind RegKind) {
return MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
(TTI.shouldMaximizeVectorBandwidth(RegKind) ||
(UseWiderVFIfCallVariantsPresent &&
Legal->hasVectorCallVariants())));
Comment on lines +3938 to +3940
Copy link
Contributor

@lukel97 lukel97 May 28, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@preames @wangpc-pp @topperc Making a note here that after this patch we'll likely regress cases like this on RVV unless we opt into shouldMaximizeVectorBandwidth https://godbolt.org/z/WcbWGooa4

}

ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
unsigned MaxTripCount, unsigned SmallestType, unsigned WidestType,
ElementCount MaxSafeVF, bool FoldTailByMasking) {
Expand Down Expand Up @@ -3983,10 +4005,7 @@ ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
: TargetTransformInfo::RGK_FixedWidthVector;
ElementCount MaxVF = MaxVectorElementCount;
if (MaximizeBandwidth ||
(MaximizeBandwidth.getNumOccurrences() == 0 &&
(TTI.shouldMaximizeVectorBandwidth(RegKind) ||
(UseWiderVFIfCallVariantsPresent && Legal->hasVectorCallVariants())))) {
if (useMaxBandwidth(RegKind)) {
auto MaxVectorElementCountMaxBW = ElementCount::get(
llvm::bit_floor(WidestRegister.getKnownMinValue() / SmallestType),
ComputeScalableMaxVF);
Expand Down Expand Up @@ -4341,15 +4360,21 @@ VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() {
for (auto &P : VPlans) {
ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
P->vectorFactors().end());
auto RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
for (auto [VF, RU] : zip_equal(VFs, RUs)) {

SmallVector<VPRegisterUsage, 8> RUs;
if (CM.useMaxBandwidth(TargetTransformInfo::RGK_ScalableVector) ||
CM.useMaxBandwidth(TargetTransformInfo::RGK_FixedWidthVector))
RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);

for (unsigned I = 0; I < VFs.size(); I++) {
ElementCount VF = VFs[I];
// The cost for scalar VF=1 is already calculated, so ignore it.
if (VF.isScalar())
continue;

/// Don't consider the VF if it exceeds the number of registers for the
/// target.
if (RU.exceedsMaxNumRegs(TTI))
if (CM.useMaxBandwidth(VF) && RUs[I].exceedsMaxNumRegs(TTI))
continue;

InstructionCost C = CM.expectedCost(VF);
Expand Down Expand Up @@ -7096,8 +7121,14 @@ VectorizationFactor LoopVectorizationPlanner::computeBestVF() {
for (auto &P : VPlans) {
ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
P->vectorFactors().end());
auto RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
for (auto [VF, RU] : zip_equal(VFs, RUs)) {

SmallVector<VPRegisterUsage, 8> RUs;
if (CM.useMaxBandwidth(TargetTransformInfo::RGK_ScalableVector) ||
CM.useMaxBandwidth(TargetTransformInfo::RGK_FixedWidthVector))
RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);

for (unsigned I = 0; I < VFs.size(); I++) {
ElementCount VF = VFs[I];
if (VF.isScalar())
continue;
if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
Expand All @@ -7119,7 +7150,7 @@ VectorizationFactor LoopVectorizationPlanner::computeBestVF() {
InstructionCost Cost = cost(*P, VF);
VectorizationFactor CurrentFactor(VF, Cost, ScalarCost);

if (RU.exceedsMaxNumRegs(TTI)) {
if (CM.useMaxBandwidth(VF) && RUs[I].exceedsMaxNumRegs(TTI)) {
LLVM_DEBUG(dbgs() << "LV(REG): Not considering vector loop of width "
<< VF << " because it uses too many registers\n");
continue;
Expand Down
136 changes: 136 additions & 0 deletions llvm/test/Transforms/LoopVectorize/ARM/mve-reg-pressure-vmla.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter-out-after "^scalar.ph:" --version 5
; RUN: opt -mattr=+mve -passes=loop-vectorize < %s -S -o - | FileCheck %s

target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "thumbv8.1m.main-unknown-none-eabihf"

; Even though it has high register pressure, this example should still vectorise since the mul+add chains become VMLAs.

define void @fn(i32 noundef %n, ptr %in, ptr %out) #0 {
; CHECK-LABEL: define void @fn(
; CHECK-SAME: i32 noundef [[N:%.*]], ptr [[IN:%.*]], ptr [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[CMP46_NOT:%.*]] = icmp eq i32 [[N]], 0
; CHECK-NEXT: br i1 [[CMP46_NOT]], [[EXIT:label %.*]], label %[[FOR_BODY_PREHEADER:.*]]
; CHECK: [[FOR_BODY_PREHEADER]]:
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[TMP0:%.*]] = mul i32 [[N]], 3
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[OUT]], i32 [[TMP0]]
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[IN]], i32 [[TMP0]]
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[OUT]], [[SCEVGEP1]]
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[IN]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], 3
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[IN]], %[[VECTOR_PH]] ], [ [[PTR_IND:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI2:%.*]] = phi ptr [ [[OUT]], %[[VECTOR_PH]] ], [ [[PTR_IND3:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> <i32 0, i32 3, i32 6, i32 9>
; CHECK-NEXT: [[VECTOR_GEP4:%.*]] = getelementptr i8, ptr [[POINTER_PHI2]], <4 x i32> <i32 0, i32 3, i32 6, i32 9>
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 [[N]])
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, <4 x ptr> [[VECTOR_GEP]], i32 1
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> [[VECTOR_GEP]], i32 1, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i8> poison), !alias.scope [[META0:![0-9]+]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, <4 x ptr> [[VECTOR_GEP]], i32 2
; CHECK-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> [[TMP1]], i32 1, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i8> poison), !alias.scope [[META0]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> [[TMP2]], i32 1, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i8> poison), !alias.scope [[META0]]
; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[WIDE_MASKED_GATHER]] to <4 x i32>
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw nsw <4 x i32> [[TMP3]], splat (i32 19595)
; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[WIDE_MASKED_GATHER5]] to <4 x i32>
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw nsw <4 x i32> [[TMP5]], splat (i32 38470)
; CHECK-NEXT: [[TMP7:%.*]] = zext <4 x i8> [[WIDE_MASKED_GATHER6]] to <4 x i32>
; CHECK-NEXT: [[TMP8:%.*]] = mul nuw nsw <4 x i32> [[TMP7]], splat (i32 7471)
; CHECK-NEXT: [[TMP9:%.*]] = add nuw nsw <4 x i32> [[TMP4]], splat (i32 32768)
; CHECK-NEXT: [[TMP10:%.*]] = add nuw nsw <4 x i32> [[TMP9]], [[TMP6]]
; CHECK-NEXT: [[TMP11:%.*]] = add nuw nsw <4 x i32> [[TMP10]], [[TMP8]]
; CHECK-NEXT: [[TMP12:%.*]] = lshr <4 x i32> [[TMP11]], splat (i32 16)
; CHECK-NEXT: [[TMP13:%.*]] = trunc <4 x i32> [[TMP12]] to <4 x i8>
; CHECK-NEXT: [[TMP14:%.*]] = mul nuw nsw <4 x i32> [[TMP3]], splat (i32 32767)
; CHECK-NEXT: [[TMP15:%.*]] = mul nuw <4 x i32> [[TMP5]], splat (i32 16762097)
; CHECK-NEXT: [[TMP16:%.*]] = mul nuw <4 x i32> [[TMP7]], splat (i32 16759568)
; CHECK-NEXT: [[TMP17:%.*]] = add nuw nsw <4 x i32> [[TMP14]], splat (i32 32768)
; CHECK-NEXT: [[TMP18:%.*]] = add nuw <4 x i32> [[TMP17]], [[TMP15]]
; CHECK-NEXT: [[TMP19:%.*]] = add <4 x i32> [[TMP18]], [[TMP16]]
; CHECK-NEXT: [[TMP20:%.*]] = lshr <4 x i32> [[TMP19]], splat (i32 16)
; CHECK-NEXT: [[TMP21:%.*]] = trunc <4 x i32> [[TMP20]] to <4 x i8>
; CHECK-NEXT: [[TMP22:%.*]] = mul nuw nsw <4 x i32> [[TMP3]], splat (i32 13282)
; CHECK-NEXT: [[TMP23:%.*]] = mul nuw <4 x i32> [[TMP5]], splat (i32 16744449)
; CHECK-NEXT: [[TMP24:%.*]] = mul nuw nsw <4 x i32> [[TMP7]], splat (i32 19485)
; CHECK-NEXT: [[TMP25:%.*]] = add nuw nsw <4 x i32> [[TMP22]], splat (i32 32768)
; CHECK-NEXT: [[TMP26:%.*]] = add nuw <4 x i32> [[TMP25]], [[TMP23]]
; CHECK-NEXT: [[TMP27:%.*]] = add nuw <4 x i32> [[TMP26]], [[TMP24]]
; CHECK-NEXT: [[TMP28:%.*]] = lshr <4 x i32> [[TMP27]], splat (i32 16)
; CHECK-NEXT: [[TMP29:%.*]] = trunc <4 x i32> [[TMP28]] to <4 x i8>
; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw i8, <4 x ptr> [[VECTOR_GEP4]], i32 1
; CHECK-NEXT: call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> [[TMP13]], <4 x ptr> [[VECTOR_GEP4]], i32 1, <4 x i1> [[ACTIVE_LANE_MASK]]), !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw i8, <4 x ptr> [[VECTOR_GEP4]], i32 2
; CHECK-NEXT: call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> [[TMP21]], <4 x ptr> [[TMP30]], i32 1, <4 x i1> [[ACTIVE_LANE_MASK]]), !alias.scope [[META3]], !noalias [[META0]]
; CHECK-NEXT: call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> [[TMP29]], <4 x ptr> [[TMP31]], i32 1, <4 x i1> [[ACTIVE_LANE_MASK]]), !alias.scope [[META3]], !noalias [[META0]]
; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i32 12
; CHECK-NEXT: [[PTR_IND3]] = getelementptr i8, ptr [[POINTER_PHI2]], i32 12
; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br [[EXIT_LOOPEXIT:label %.*]]
; CHECK: [[SCALAR_PH]]:
;
entry:
%cmp46.not = icmp eq i32 %n, 0
br i1 %cmp46.not, label %exit, label %for.body

for.body: ; preds = %for.body.preheader, %for.body
%ptr.iv.1 = phi ptr [ %in, %entry ], [ %ptr.iv.1.next, %for.body ]
%ptr.iv.2 = phi ptr [ %out, %entry ], [ %ptr.iv.2.next, %for.body ]
%iv = phi i32 [ %iv.next, %for.body ], [ 0, %entry ]
%incdec.ptr = getelementptr inbounds nuw i8, ptr %ptr.iv.1, i32 1
%0 = load i8, ptr %ptr.iv.1, align 1
%incdec.ptr1 = getelementptr inbounds nuw i8, ptr %ptr.iv.1, i32 2
%1 = load i8, ptr %incdec.ptr, align 1
%ptr.iv.1.next = getelementptr inbounds nuw i8, ptr %ptr.iv.1, i32 3
%2 = load i8, ptr %incdec.ptr1, align 1
%conv = zext i8 %0 to i32
%mul = mul nuw nsw i32 %conv, 19595
%conv3 = zext i8 %1 to i32
%mul4 = mul nuw nsw i32 %conv3, 38470
%conv5 = zext i8 %2 to i32
%mul6 = mul nuw nsw i32 %conv5, 7471
%add = add nuw nsw i32 %mul, 32768
%add7 = add nuw nsw i32 %add, %mul4
%add8 = add nuw nsw i32 %add7, %mul6
%shr = lshr i32 %add8, 16
%conv9 = trunc nuw i32 %shr to i8
%mul11 = mul nuw nsw i32 %conv, 32767
%mul13 = mul nuw i32 %conv3, 16762097
%mul16 = mul nuw i32 %conv5, 16759568
%add14 = add nuw nsw i32 %mul11, 32768
%add17 = add nuw i32 %add14, %mul13
%add18 = add i32 %add17, %mul16
%shr19 = lshr i32 %add18, 16
%conv20 = trunc i32 %shr19 to i8
%mul22 = mul nuw nsw i32 %conv, 13282
%mul24 = mul nuw i32 %conv3, 16744449
%mul27 = mul nuw nsw i32 %conv5, 19485
%add25 = add nuw nsw i32 %mul22, 32768
%add28 = add nuw i32 %add25, %mul24
%add29 = add nuw i32 %add28, %mul27
%shr30 = lshr i32 %add29, 16
%conv31 = trunc i32 %shr30 to i8
%incdec.ptr32 = getelementptr inbounds nuw i8, ptr %ptr.iv.2, i32 1
store i8 %conv9, ptr %ptr.iv.2, align 1
%incdec.ptr33 = getelementptr inbounds nuw i8, ptr %ptr.iv.2, i32 2
store i8 %conv20, ptr %incdec.ptr32, align 1
%ptr.iv.2.next = getelementptr inbounds nuw i8, ptr %ptr.iv.2, i32 3
store i8 %conv31, ptr %incdec.ptr33, align 1
%iv.next = add nuw i32 %iv, 1
%exitcond.not = icmp eq i32 %iv.next, %n
br i1 %exitcond.not, label %exit, label %for.body

exit: ; preds = %for.cond.cleanup.loopexit, %entry
ret void
}
Loading