Skip to content

Commit 7dd9b3d

Browse files
committed
[LV] Also handle non-uniform scalarized loads when processing AddrDefs.
Loads of addresses are scalarized and have their costs computed w/o scalarization overhead. Consistently apply this logic also to non-uniform loads that are already scalarized, to ensure their costs are consistent with other scalarized lodas that are used as addresses.
1 parent 5466211 commit 7dd9b3d

File tree

2 files changed

+38
-26
lines changed

2 files changed

+38
-26
lines changed

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5717,8 +5717,11 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
57175717
// if the loaded register is involved in an address computation, it is
57185718
// instead changed here when we know this is the case.
57195719
InstWidening Decision = getWideningDecision(I, VF);
5720-
if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
5721-
// Scalarize a widened load of address.
5720+
if (Decision == CM_Widen || Decision == CM_Widen_Reverse ||
5721+
(!isPredicatedInst(I) && !Legal->isUniformMemOp(*I, VF) &&
5722+
Decision == CM_Scalarize))
5723+
// Scalarize a widened load of address or update the cost of a scalar
5724+
// load of an address.
57225725
setWideningDecision(
57235726
I, VF, CM_Scalarize,
57245727
(VF.getKnownMinValue() *

llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll

Lines changed: 33 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -701,36 +701,45 @@ exit:
701701
define double @test_scalarization_cost_for_load_of_address(ptr %src.0, ptr %src.1, ptr %src.2) {
702702
; CHECK-LABEL: define double @test_scalarization_cost_for_load_of_address(
703703
; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], ptr [[SRC_2:%.*]]) {
704-
; CHECK-NEXT: [[ENTRY:.*]]:
705-
; CHECK-NEXT: br label %[[LOOP:.*]]
706-
; CHECK: [[LOOP]]:
707-
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
708-
; CHECK-NEXT: [[RED:%.*]] = phi double [ 3.000000e+00, %[[ENTRY]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
704+
; CHECK-NEXT: [[ENTRY:.*:]]
705+
; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
706+
; CHECK: [[VECTOR_PH]]:
707+
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
708+
; CHECK: [[VECTOR_BODY]]:
709+
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
710+
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi double [ 3.000000e+00, %[[VECTOR_PH]] ], [ [[TMP21:%.*]], %[[VECTOR_BODY]] ]
711+
; CHECK-NEXT: [[IV:%.*]] = add i64 [[INDEX]], 0
712+
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
709713
; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr [[T:%.*]], ptr [[SRC_0]], i64 [[IV]]
710-
; CHECK-NEXT: [[L_0:%.*]] = load double, ptr [[GEP_0]], align 8
711-
; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr i8, ptr [[GEP_0]], i64 8
712-
; CHECK-NEXT: [[L_1:%.*]] = load double, ptr [[GEP_8]], align 8
713-
; CHECK-NEXT: [[GEP_16:%.*]] = getelementptr i8, ptr [[GEP_0]], i64 16
714-
; CHECK-NEXT: [[L_2:%.*]] = load double, ptr [[GEP_16]], align 8
715-
; CHECK-NEXT: [[MUL_0:%.*]] = fmul double [[L_0]], 3.000000e+00
716-
; CHECK-NEXT: [[MUL_1:%.*]] = fmul double [[L_1]], 3.000000e+00
717-
; CHECK-NEXT: [[MUL_2:%.*]] = fmul double [[L_2]], 3.000000e+00
718-
; CHECK-NEXT: [[ADD_0:%.*]] = fadd double [[MUL_0]], [[MUL_1]]
719-
; CHECK-NEXT: [[ADD_1:%.*]] = fadd double [[ADD_0]], [[MUL_2]]
714+
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <6 x double>, ptr [[GEP_0]], align 8
715+
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <6 x double> [[WIDE_VEC]], <6 x double> poison, <2 x i32> <i32 0, i32 3>
716+
; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <6 x double> [[WIDE_VEC]], <6 x double> poison, <2 x i32> <i32 1, i32 4>
717+
; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <6 x double> [[WIDE_VEC]], <6 x double> poison, <2 x i32> <i32 2, i32 5>
718+
; CHECK-NEXT: [[TMP3:%.*]] = fmul <2 x double> [[STRIDED_VEC]], splat (double 3.000000e+00)
719+
; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x double> [[STRIDED_VEC1]], splat (double 3.000000e+00)
720+
; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[STRIDED_VEC2]], splat (double 3.000000e+00)
721+
; CHECK-NEXT: [[TMP6:%.*]] = fadd <2 x double> [[TMP3]], [[TMP4]]
722+
; CHECK-NEXT: [[TMP7:%.*]] = fadd <2 x double> [[TMP6]], [[TMP5]]
720723
; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr double, ptr [[SRC_1]], i64 [[IV]]
721-
; CHECK-NEXT: [[L:%.*]] = load double, ptr [[GEP_SRC]], align 8
722-
; CHECK-NEXT: [[MUL256_US:%.*]] = fmul double [[ADD_1]], [[L]]
724+
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[GEP_SRC]], align 8
725+
; CHECK-NEXT: [[TMP9:%.*]] = fmul <2 x double> [[TMP7]], [[WIDE_LOAD]]
723726
; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr [[T_2:%.*]], ptr [[SRC_2]], i64 [[IV]]
727+
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr [[T_2]], ptr [[SRC_2]], i64 [[TMP1]]
724728
; CHECK-NEXT: [[GEP_72:%.*]] = getelementptr i8, ptr [[GEP_SRC_2]], i64 72
729+
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP11]], i64 72
725730
; CHECK-NEXT: [[L_P_2:%.*]] = load ptr, ptr [[GEP_72]], align 8
731+
; CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP13]], align 8
726732
; CHECK-NEXT: [[LV:%.*]] = load double, ptr [[L_P_2]], align 8
727-
; CHECK-NEXT: [[RED_NEXT]] = tail call double @llvm.fmuladd.f64(double [[MUL256_US]], double [[LV]], double [[RED]])
728-
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
729-
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1
730-
; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
731-
; CHECK: [[EXIT]]:
732-
; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi double [ [[RED_NEXT]], %[[LOOP]] ]
733-
; CHECK-NEXT: ret double [[RED_NEXT_LCSSA]]
733+
; CHECK-NEXT: [[TMP17:%.*]] = load double, ptr [[TMP15]], align 8
734+
; CHECK-NEXT: [[TMP18:%.*]] = insertelement <2 x double> poison, double [[LV]], i32 0
735+
; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x double> [[TMP18]], double [[TMP17]], i32 1
736+
; CHECK-NEXT: [[TMP20:%.*]] = fmul <2 x double> [[TMP9]], [[TMP19]]
737+
; CHECK-NEXT: [[TMP21]] = call double @llvm.vector.reduce.fadd.v2f64(double [[VEC_PHI]], <2 x double> [[TMP20]])
738+
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
739+
; CHECK-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
740+
; CHECK: [[MIDDLE_BLOCK]]:
741+
; CHECK-NEXT: br [[EXIT:label %.*]]
742+
; CHECK: [[SCALAR_PH:.*:]]
734743
;
735744
entry:
736745
br label %loop

0 commit comments

Comments
 (0)