77define <2 x i64 > @test_vp_splice_v2i64 (<2 x i64 > %va , <2 x i64 > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
88; CHECK-LABEL: test_vp_splice_v2i64:
99; CHECK: # %bb.0:
10- ; CHECK-NEXT: addi a0, a0, -5
10+ ; CHECK-NEXT: addi a0, a0, -1
1111; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
12- ; CHECK-NEXT: vslidedown.vi v8, v8, 5
12+ ; CHECK-NEXT: vslidedown.vi v8, v8, 1
1313; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1414; CHECK-NEXT: vslideup.vx v8, v9, a0
1515; CHECK-NEXT: ret
1616
17- %v = call <2 x i64 > @llvm.experimental.vp.splice.v2i64 (<2 x i64 > %va , <2 x i64 > %vb , i32 5 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
17+ %v = call <2 x i64 > @llvm.experimental.vp.splice.v2i64 (<2 x i64 > %va , <2 x i64 > %vb , i32 1 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
1818 ret <2 x i64 > %v
1919}
2020
2121define <2 x i64 > @test_vp_splice_v2i64_negative_offset (<2 x i64 > %va , <2 x i64 > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
2222; CHECK-LABEL: test_vp_splice_v2i64_negative_offset:
2323; CHECK: # %bb.0:
24- ; CHECK-NEXT: addi a0, a0, -5
25- ; CHECK-NEXT: vsetivli zero, 5 , e64, m1, ta, ma
24+ ; CHECK-NEXT: addi a0, a0, -1
25+ ; CHECK-NEXT: vsetivli zero, 1 , e64, m1, ta, ma
2626; CHECK-NEXT: vslidedown.vx v8, v8, a0
2727; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
28- ; CHECK-NEXT: vslideup.vi v8, v9, 5
28+ ; CHECK-NEXT: vslideup.vi v8, v9, 1
2929; CHECK-NEXT: ret
3030
31- %v = call <2 x i64 > @llvm.experimental.vp.splice.v2i64 (<2 x i64 > %va , <2 x i64 > %vb , i32 -5 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
31+ %v = call <2 x i64 > @llvm.experimental.vp.splice.v2i64 (<2 x i64 > %va , <2 x i64 > %vb , i32 -1 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
3232 ret <2 x i64 > %v
3333}
3434
@@ -46,54 +46,54 @@ define <2 x i64> @test_vp_splice_v2i64_zero_offset(<2 x i64> %va, <2 x i64> %vb,
4646define <2 x i64 > @test_vp_splice_v2i64_masked (<2 x i64 > %va , <2 x i64 > %vb , <2 x i1 > %mask , i32 zeroext %evla , i32 zeroext %evlb ) {
4747; CHECK-LABEL: test_vp_splice_v2i64_masked:
4848; CHECK: # %bb.0:
49- ; CHECK-NEXT: addi a0, a0, -5
49+ ; CHECK-NEXT: addi a0, a0, -1
5050; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
51- ; CHECK-NEXT: vslidedown.vi v8, v8, 5 , v0.t
51+ ; CHECK-NEXT: vslidedown.vi v8, v8, 1 , v0.t
5252; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
5353; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
5454; CHECK-NEXT: ret
55- %v = call <2 x i64 > @llvm.experimental.vp.splice.v2i64 (<2 x i64 > %va , <2 x i64 > %vb , i32 5 , <2 x i1 > %mask , i32 %evla , i32 %evlb )
55+ %v = call <2 x i64 > @llvm.experimental.vp.splice.v2i64 (<2 x i64 > %va , <2 x i64 > %vb , i32 1 , <2 x i1 > %mask , i32 %evla , i32 %evlb )
5656 ret <2 x i64 > %v
5757}
5858
5959define <4 x i32 > @test_vp_splice_v4i32 (<4 x i32 > %va , <4 x i32 > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
6060; CHECK-LABEL: test_vp_splice_v4i32:
6161; CHECK: # %bb.0:
62- ; CHECK-NEXT: addi a0, a0, -5
62+ ; CHECK-NEXT: addi a0, a0, -3
6363; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
64- ; CHECK-NEXT: vslidedown.vi v8, v8, 5
64+ ; CHECK-NEXT: vslidedown.vi v8, v8, 3
6565; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
6666; CHECK-NEXT: vslideup.vx v8, v9, a0
6767; CHECK-NEXT: ret
6868
69- %v = call <4 x i32 > @llvm.experimental.vp.splice.v4i32 (<4 x i32 > %va , <4 x i32 > %vb , i32 5 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
69+ %v = call <4 x i32 > @llvm.experimental.vp.splice.v4i32 (<4 x i32 > %va , <4 x i32 > %vb , i32 3 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
7070 ret <4 x i32 > %v
7171}
7272
7373define <4 x i32 > @test_vp_splice_v4i32_negative_offset (<4 x i32 > %va , <4 x i32 > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
7474; CHECK-LABEL: test_vp_splice_v4i32_negative_offset:
7575; CHECK: # %bb.0:
76- ; CHECK-NEXT: addi a0, a0, -5
77- ; CHECK-NEXT: vsetivli zero, 5 , e32, m1, ta, ma
76+ ; CHECK-NEXT: addi a0, a0, -3
77+ ; CHECK-NEXT: vsetivli zero, 3 , e32, m1, ta, ma
7878; CHECK-NEXT: vslidedown.vx v8, v8, a0
7979; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
80- ; CHECK-NEXT: vslideup.vi v8, v9, 5
80+ ; CHECK-NEXT: vslideup.vi v8, v9, 3
8181; CHECK-NEXT: ret
8282
83- %v = call <4 x i32 > @llvm.experimental.vp.splice.v4i32 (<4 x i32 > %va , <4 x i32 > %vb , i32 -5 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
83+ %v = call <4 x i32 > @llvm.experimental.vp.splice.v4i32 (<4 x i32 > %va , <4 x i32 > %vb , i32 -3 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
8484 ret <4 x i32 > %v
8585}
8686
8787define <4 x i32 > @test_vp_splice_v4i32_masked (<4 x i32 > %va , <4 x i32 > %vb , <4 x i1 > %mask , i32 zeroext %evla , i32 zeroext %evlb ) {
8888; CHECK-LABEL: test_vp_splice_v4i32_masked:
8989; CHECK: # %bb.0:
90- ; CHECK-NEXT: addi a0, a0, -5
90+ ; CHECK-NEXT: addi a0, a0, -3
9191; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
92- ; CHECK-NEXT: vslidedown.vi v8, v8, 5 , v0.t
92+ ; CHECK-NEXT: vslidedown.vi v8, v8, 3 , v0.t
9393; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
9494; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
9595; CHECK-NEXT: ret
96- %v = call <4 x i32 > @llvm.experimental.vp.splice.v4i32 (<4 x i32 > %va , <4 x i32 > %vb , i32 5 , <4 x i1 > %mask , i32 %evla , i32 %evlb )
96+ %v = call <4 x i32 > @llvm.experimental.vp.splice.v4i32 (<4 x i32 > %va , <4 x i32 > %vb , i32 3 , <4 x i1 > %mask , i32 %evla , i32 %evlb )
9797 ret <4 x i32 > %v
9898}
9999
@@ -182,82 +182,82 @@ define <16 x i8> @test_vp_splice_v16i8_masked(<16 x i8> %va, <16 x i8> %vb, <16
182182define <2 x double > @test_vp_splice_v2f64 (<2 x double > %va , <2 x double > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
183183; CHECK-LABEL: test_vp_splice_v2f64:
184184; CHECK: # %bb.0:
185- ; CHECK-NEXT: addi a0, a0, -5
185+ ; CHECK-NEXT: addi a0, a0, -1
186186; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
187- ; CHECK-NEXT: vslidedown.vi v8, v8, 5
187+ ; CHECK-NEXT: vslidedown.vi v8, v8, 1
188188; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
189189; CHECK-NEXT: vslideup.vx v8, v9, a0
190190; CHECK-NEXT: ret
191191
192- %v = call <2 x double > @llvm.experimental.vp.splice.v2f64 (<2 x double > %va , <2 x double > %vb , i32 5 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
192+ %v = call <2 x double > @llvm.experimental.vp.splice.v2f64 (<2 x double > %va , <2 x double > %vb , i32 1 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
193193 ret <2 x double > %v
194194}
195195
196196define <2 x double > @test_vp_splice_v2f64_negative_offset (<2 x double > %va , <2 x double > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
197197; CHECK-LABEL: test_vp_splice_v2f64_negative_offset:
198198; CHECK: # %bb.0:
199- ; CHECK-NEXT: addi a0, a0, -5
200- ; CHECK-NEXT: vsetivli zero, 5 , e64, m1, ta, ma
199+ ; CHECK-NEXT: addi a0, a0, -1
200+ ; CHECK-NEXT: vsetivli zero, 1 , e64, m1, ta, ma
201201; CHECK-NEXT: vslidedown.vx v8, v8, a0
202202; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
203- ; CHECK-NEXT: vslideup.vi v8, v9, 5
203+ ; CHECK-NEXT: vslideup.vi v8, v9, 1
204204; CHECK-NEXT: ret
205205
206- %v = call <2 x double > @llvm.experimental.vp.splice.v2f64 (<2 x double > %va , <2 x double > %vb , i32 -5 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
206+ %v = call <2 x double > @llvm.experimental.vp.splice.v2f64 (<2 x double > %va , <2 x double > %vb , i32 -1 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
207207 ret <2 x double > %v
208208}
209209
210210define <2 x double > @test_vp_splice_v2f64_masked (<2 x double > %va , <2 x double > %vb , <2 x i1 > %mask , i32 zeroext %evla , i32 zeroext %evlb ) {
211211; CHECK-LABEL: test_vp_splice_v2f64_masked:
212212; CHECK: # %bb.0:
213- ; CHECK-NEXT: addi a0, a0, -5
213+ ; CHECK-NEXT: addi a0, a0, -1
214214; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
215- ; CHECK-NEXT: vslidedown.vi v8, v8, 5 , v0.t
215+ ; CHECK-NEXT: vslidedown.vi v8, v8, 1 , v0.t
216216; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
217217; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
218218; CHECK-NEXT: ret
219- %v = call <2 x double > @llvm.experimental.vp.splice.v2f64 (<2 x double > %va , <2 x double > %vb , i32 5 , <2 x i1 > %mask , i32 %evla , i32 %evlb )
219+ %v = call <2 x double > @llvm.experimental.vp.splice.v2f64 (<2 x double > %va , <2 x double > %vb , i32 1 , <2 x i1 > %mask , i32 %evla , i32 %evlb )
220220 ret <2 x double > %v
221221}
222222
223223define <4 x float > @test_vp_splice_v4f32 (<4 x float > %va , <4 x float > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
224224; CHECK-LABEL: test_vp_splice_v4f32:
225225; CHECK: # %bb.0:
226- ; CHECK-NEXT: addi a0, a0, -5
226+ ; CHECK-NEXT: addi a0, a0, -3
227227; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
228- ; CHECK-NEXT: vslidedown.vi v8, v8, 5
228+ ; CHECK-NEXT: vslidedown.vi v8, v8, 3
229229; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
230230; CHECK-NEXT: vslideup.vx v8, v9, a0
231231; CHECK-NEXT: ret
232232
233- %v = call <4 x float > @llvm.experimental.vp.splice.v4f32 (<4 x float > %va , <4 x float > %vb , i32 5 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
233+ %v = call <4 x float > @llvm.experimental.vp.splice.v4f32 (<4 x float > %va , <4 x float > %vb , i32 3 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
234234 ret <4 x float > %v
235235}
236236
237237define <4 x float > @test_vp_splice_v4f32_negative_offset (<4 x float > %va , <4 x float > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
238238; CHECK-LABEL: test_vp_splice_v4f32_negative_offset:
239239; CHECK: # %bb.0:
240- ; CHECK-NEXT: addi a0, a0, -5
241- ; CHECK-NEXT: vsetivli zero, 5 , e32, m1, ta, ma
240+ ; CHECK-NEXT: addi a0, a0, -3
241+ ; CHECK-NEXT: vsetivli zero, 3 , e32, m1, ta, ma
242242; CHECK-NEXT: vslidedown.vx v8, v8, a0
243243; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
244- ; CHECK-NEXT: vslideup.vi v8, v9, 5
244+ ; CHECK-NEXT: vslideup.vi v8, v9, 3
245245; CHECK-NEXT: ret
246246
247- %v = call <4 x float > @llvm.experimental.vp.splice.v4f32 (<4 x float > %va , <4 x float > %vb , i32 -5 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
247+ %v = call <4 x float > @llvm.experimental.vp.splice.v4f32 (<4 x float > %va , <4 x float > %vb , i32 -3 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
248248 ret <4 x float > %v
249249}
250250
251251define <4 x float > @test_vp_splice_v4f32_masked (<4 x float > %va , <4 x float > %vb , <4 x i1 > %mask , i32 zeroext %evla , i32 zeroext %evlb ) {
252252; CHECK-LABEL: test_vp_splice_v4f32_masked:
253253; CHECK: # %bb.0:
254- ; CHECK-NEXT: addi a0, a0, -5
254+ ; CHECK-NEXT: addi a0, a0, -3
255255; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
256- ; CHECK-NEXT: vslidedown.vi v8, v8, 5 , v0.t
256+ ; CHECK-NEXT: vslidedown.vi v8, v8, 3 , v0.t
257257; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
258258; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
259259; CHECK-NEXT: ret
260- %v = call <4 x float > @llvm.experimental.vp.splice.v4f32 (<4 x float > %va , <4 x float > %vb , i32 5 , <4 x i1 > %mask , i32 %evla , i32 %evlb )
260+ %v = call <4 x float > @llvm.experimental.vp.splice.v4f32 (<4 x float > %va , <4 x float > %vb , i32 3 , <4 x i1 > %mask , i32 %evla , i32 %evlb )
261261 ret <4 x float > %v
262262}
263263
0 commit comments