Skip to content

Commit 64dfcd2

Browse files
committed
[VectorUtils] Check wrap with assume in analyzeInterleaving
Due to several improvements to LAA, the pending TODO item to check for wrap with assume in getPtrStride can be fixed.
1 parent 912cc5f commit 64dfcd2

File tree

4 files changed

+63
-47
lines changed

4 files changed

+63
-47
lines changed

llvm/lib/Analysis/VectorUtils.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1644,7 +1644,7 @@ void InterleavedAccessInfo::analyzeInterleaving(
16441644
Value *MemberPtr = getLoadStorePointerOperand(Member);
16451645
Type *AccessTy = getLoadStoreType(Member);
16461646
if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides,
1647-
/*Assume=*/false, /*ShouldCheckWrap=*/true).value_or(0))
1647+
/*Assume=*/true, /*ShouldCheckWrap=*/true))
16481648
return false;
16491649
LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
16501650
<< FirstOrLast
@@ -1657,10 +1657,6 @@ void InterleavedAccessInfo::analyzeInterleaving(
16571657
// accesses may wrap around. We have to revisit the getPtrStride analysis,
16581658
// this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
16591659
// not check wrapping (see documentation there).
1660-
// FORNOW we use Assume=false;
1661-
// TODO: Change to Assume=true but making sure we don't exceed the threshold
1662-
// of runtime SCEV assumptions checks (thereby potentially failing to
1663-
// vectorize altogether).
16641660
// Additional optional optimizations:
16651661
// TODO: If we are peeling the loop and we know that the first pointer doesn't
16661662
// wrap then we can deduce that all pointers in the group don't wrap.

llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll

Lines changed: 31 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -154,6 +154,24 @@ define void @copy_stride4(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
154154
; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 2
155155
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
156156
; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
157+
; CHECK: vector.scevcheck:
158+
; CHECK-NEXT: [[UMAX1:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 4)
159+
; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[UMAX1]], -1
160+
; CHECK-NEXT: [[TMP16:%.*]] = lshr i64 [[TMP14]], 2
161+
; CHECK-NEXT: [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 16, i64 [[TMP16]])
162+
; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL]], 0
163+
; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
164+
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[MUL_RESULT]]
165+
; CHECK-NEXT: [[TMP6:%.*]] = icmp ult ptr [[TMP5]], [[SRC]]
166+
; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP6]], [[MUL_OVERFLOW]]
167+
; CHECK-NEXT: [[MUL1:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 16, i64 [[TMP16]])
168+
; CHECK-NEXT: [[MUL_RESULT2:%.*]] = extractvalue { i64, i1 } [[MUL1]], 0
169+
; CHECK-NEXT: [[MUL_OVERFLOW3:%.*]] = extractvalue { i64, i1 } [[MUL1]], 1
170+
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[DST:%.*]], i64 [[MUL_RESULT2]]
171+
; CHECK-NEXT: [[TMP23:%.*]] = icmp ult ptr [[TMP17]], [[DST]]
172+
; CHECK-NEXT: [[TMP24:%.*]] = or i1 [[TMP23]], [[MUL_OVERFLOW3]]
173+
; CHECK-NEXT: [[TMP25:%.*]] = or i1 [[TMP7]], [[TMP24]]
174+
; CHECK-NEXT: br i1 [[TMP25]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH1:%.*]]
157175
; CHECK: vector.ph:
158176
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
159177
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
@@ -171,12 +189,12 @@ define void @copy_stride4(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
171189
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
172190
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
173191
; CHECK: vector.body:
174-
; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VECTOR_BODY]] ]
175-
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
176-
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
177-
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[SRC:%.*]], <vscale x 4 x i64> [[VEC_IND]]
192+
; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH1]] ], [ [[INDEX_NEXT2:%.*]], [[VECTOR_BODY]] ]
193+
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH1]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
194+
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH1]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
195+
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[SRC]], <vscale x 4 x i64> [[VEC_IND]]
178196
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP19]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
179-
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[DST:%.*]], <vscale x 4 x i64> [[VEC_IND]]
197+
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 4 x i64> [[VEC_IND]]
180198
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[WIDE_MASKED_GATHER]], <vscale x 4 x ptr> align 4 [[TMP20]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
181199
; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP4]]
182200
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP12]])
@@ -186,8 +204,7 @@ define void @copy_stride4(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
186204
; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
187205
; CHECK: middle.block:
188206
; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
189-
; CHECK: while.end.loopexit:
190-
; CHECK-NEXT: ret void
207+
; CHECK: scalar.ph:
191208
;
192209
entry:
193210
br label %while.body
@@ -235,7 +252,7 @@ define void @simple_gather_scatter(ptr noalias %dst, ptr noalias %src, ptr noali
235252
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
236253
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
237254
; CHECK-NEXT: [[TMP16:%.*]] = xor i1 [[TMP15]], true
238-
; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
255+
; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
239256
; CHECK: middle.block:
240257
; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
241258
; CHECK: while.end.loopexit:
@@ -289,7 +306,7 @@ define void @uniform_load(ptr noalias %dst, ptr noalias readonly %src, i64 %n) #
289306
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
290307
; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
291308
; CHECK-NEXT: [[TMP13:%.*]] = xor i1 [[TMP14]], true
292-
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
309+
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
293310
; CHECK: middle.block:
294311
; CHECK-NEXT: br label [[FOR_END:%.*]]
295312
; CHECK: for.end:
@@ -348,7 +365,7 @@ define void @cond_uniform_load(ptr noalias %dst, ptr noalias readonly %src, ptr
348365
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
349366
; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
350367
; CHECK-NEXT: [[TMP18:%.*]] = xor i1 [[TMP17]], true
351-
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
368+
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
352369
; CHECK: middle.block:
353370
; CHECK-NEXT: br label [[FOR_END:%.*]]
354371
; CHECK: for.end:
@@ -410,7 +427,7 @@ define void @uniform_store(ptr noalias %dst, ptr noalias readonly %src, i64 %n)
410427
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
411428
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
412429
; CHECK-NEXT: [[TMP12:%.*]] = xor i1 [[TMP13]], true
413-
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
430+
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
414431
; CHECK: middle.block:
415432
; CHECK-NEXT: br label [[FOR_END:%.*]]
416433
; CHECK: for.end:
@@ -462,7 +479,7 @@ define void @simple_fdiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
462479
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
463480
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
464481
; CHECK-NEXT: [[TMP14:%.*]] = xor i1 [[TMP13]], true
465-
; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
482+
; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
466483
; CHECK: middle.block:
467484
; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
468485
; CHECK: while.end.loopexit:
@@ -518,7 +535,7 @@ define void @simple_idiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
518535
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
519536
; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
520537
; CHECK-NEXT: [[TMP17:%.*]] = xor i1 [[TMP14]], true
521-
; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
538+
; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
522539
; CHECK: middle.block:
523540
; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
524541
; CHECK: while.end.loopexit:
@@ -561,7 +578,7 @@ define void @simple_memset_trip1024(i32 %val, ptr %ptr, i64 %n) #0 {
561578
; CHECK-NEXT: store <vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP7]], align 4
562579
; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP3]]
563580
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]]
564-
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
581+
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
565582
; CHECK: middle.block:
566583
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
567584
; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END_LOOPEXIT:%.*]], label [[SCALAR_PH:%.*]]

llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll

Lines changed: 28 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
22
; RUN: opt -p loop-vectorize -mtriple riscv64-linux-gnu -mattr=+v,+f -S %s | FileCheck %s
33

44
target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
@@ -351,7 +351,18 @@ define void @gather_interleave_group_with_dead_insert_pos(i64 %N, ptr noalias %s
351351
; CHECK-NEXT: [[TMP0:%.*]] = add nuw i64 [[SMAX]], 1
352352
; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 1
353353
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
354-
; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
354+
; CHECK-NEXT: br label %[[VECTOR_SCEVCHECK:.*]]
355+
; CHECK: [[VECTOR_SCEVCHECK]]:
356+
; CHECK-NEXT: [[SMAX1:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 0)
357+
; CHECK-NEXT: [[TMP3:%.*]] = add nuw i64 [[SMAX1]], 1
358+
; CHECK-NEXT: [[TMP4:%.*]] = lshr i64 [[TMP3]], 1
359+
; CHECK-NEXT: [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 8, i64 [[TMP4]])
360+
; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL]], 0
361+
; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
362+
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[DST]], i64 [[MUL_RESULT]]
363+
; CHECK-NEXT: [[TMP6:%.*]] = icmp ult ptr [[TMP5]], [[DST]]
364+
; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP6]], [[MUL_OVERFLOW]]
365+
; CHECK-NEXT: br i1 [[TMP7]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
355366
; CHECK: [[VECTOR_PH]]:
356367
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
357368
; CHECK-NEXT: [[TMP11:%.*]] = mul <vscale x 4 x i64> [[TMP9]], splat (i64 2)
@@ -383,6 +394,21 @@ define void @gather_interleave_group_with_dead_insert_pos(i64 %N, ptr noalias %s
383394
; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
384395
; CHECK: [[MIDDLE_BLOCK]]:
385396
; CHECK-NEXT: br label %[[EXIT:.*]]
397+
; CHECK: [[SCALAR_PH]]:
398+
; CHECK-NEXT: br label %[[LOOP:.*]]
399+
; CHECK: [[LOOP]]:
400+
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
401+
; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]]
402+
; CHECK-NEXT: [[L_DEAD:%.*]] = load i8, ptr [[GEP_SRC_0]], align 1
403+
; CHECK-NEXT: [[IV_1:%.*]] = add i64 [[IV]], 1
404+
; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV_1]]
405+
; CHECK-NEXT: [[L_1:%.*]] = load i8, ptr [[GEP_SRC_1]], align 1
406+
; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[L_1]] to i32
407+
; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i32, ptr [[DST]], i64 [[IV]]
408+
; CHECK-NEXT: store i32 [[EXT]], ptr [[GEP_DST]], align 4
409+
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 2
410+
; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV]], [[N]]
411+
; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP19:![0-9]+]]
386412
; CHECK: [[EXIT]]:
387413
; CHECK-NEXT: ret void
388414
;
@@ -408,25 +434,3 @@ exit:
408434
}
409435

410436
attributes #0 = { "target-features"="+64bit,+v" }
411-
412-
;.
413-
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
414-
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
415-
; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
416-
; CHECK: [[META3]] = !{[[META4:![0-9]+]]}
417-
; CHECK: [[META4]] = distinct !{[[META4]], [[META5:![0-9]+]]}
418-
; CHECK: [[META5]] = distinct !{[[META5]], !"LVerDomain"}
419-
; CHECK: [[META6]] = !{[[META7:![0-9]+]]}
420-
; CHECK: [[META7]] = distinct !{[[META7]], [[META5]]}
421-
; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
422-
; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]}
423-
; CHECK: [[META10]] = !{[[META11:![0-9]+]]}
424-
; CHECK: [[META11]] = distinct !{[[META11]], [[META12:![0-9]+]]}
425-
; CHECK: [[META12]] = distinct !{[[META12]], !"LVerDomain"}
426-
; CHECK: [[META13]] = !{[[META14:![0-9]+]]}
427-
; CHECK: [[META14]] = distinct !{[[META14]], [[META12]]}
428-
; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]], [[META2]]}
429-
; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]]}
430-
; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]], [[META2]]}
431-
; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]}
432-
;.

llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -351,16 +351,16 @@ define void @geps_feeding_interleave_groups_with_reuse2(ptr %A, ptr %B, i64 %N)
351351
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
352352
; CHECK: [[VECTOR_BODY]]:
353353
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
354-
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 8, i64 16, i64 24>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
355354
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
356355
; CHECK-NEXT: [[TMP51:%.*]] = lshr exact i64 [[OFFSET_IDX]], 1
357356
; CHECK-NEXT: [[TMP52:%.*]] = getelementptr nusw i32, ptr [[B]], i64 [[TMP51]]
358357
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x i32>, ptr [[TMP52]], align 4, !alias.scope [[META10:![0-9]+]], !noalias [[META13:![0-9]+]]
359358
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
360359
; CHECK-NEXT: [[STRIDED_VEC34:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
361360
; CHECK-NEXT: [[TMP56:%.*]] = getelementptr i32, ptr [[A]], i64 [[OFFSET_IDX]]
362-
; CHECK-NEXT: [[TMP54:%.*]] = getelementptr i32, ptr [[B]], <4 x i64> [[VEC_IND]]
363-
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> align 4 [[TMP54]], <4 x i1> splat (i1 true), <4 x i32> poison), !alias.scope [[META15:![0-9]+]], !noalias [[META13]]
361+
; CHECK-NEXT: [[TMP54:%.*]] = getelementptr i32, ptr [[B]], i64 [[OFFSET_IDX]]
362+
; CHECK-NEXT: [[WIDE_VEC42:%.*]] = load <32 x i32>, ptr [[TMP54]], align 4, !alias.scope [[META15:![0-9]+]], !noalias [[META13]]
363+
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = shufflevector <32 x i32> [[WIDE_VEC42]], <32 x i32> poison, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
364364
; CHECK-NEXT: [[TMP58:%.*]] = shufflevector <4 x i32> [[STRIDED_VEC]], <4 x i32> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
365365
; CHECK-NEXT: [[TMP59:%.*]] = shufflevector <4 x i32> [[STRIDED_VEC34]], <4 x i32> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
366366
; CHECK-NEXT: [[TMP60:%.*]] = shufflevector <4 x i32> [[WIDE_MASKED_GATHER]], <4 x i32> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -370,7 +370,6 @@ define void @geps_feeding_interleave_groups_with_reuse2(ptr %A, ptr %B, i64 %N)
370370
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <32 x i32> [[TMP63]], <32 x i32> poison, <32 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
371371
; CHECK-NEXT: store <32 x i32> [[INTERLEAVED_VEC]], ptr [[TMP56]], align 4, !alias.scope [[META13]]
372372
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
373-
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 32)
374373
; CHECK-NEXT: [[TMP64:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
375374
; CHECK-NEXT: br i1 [[TMP64]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
376375
; CHECK: [[MIDDLE_BLOCK]]:

0 commit comments

Comments
 (0)