@@ -80,10 +80,9 @@ define <vscale x 4 x i8> @insert_nxv1i8_nxv4i8_3(<vscale x 4 x i8> %vec, <vscale
8080; CHECK-NEXT: csrr a0, vlenb
8181; CHECK-NEXT: srli a0, a0, 3
8282; CHECK-NEXT: slli a1, a0, 1
83- ; CHECK-NEXT: add a1, a1, a0
8483; CHECK-NEXT: add a0, a1, a0
85- ; CHECK-NEXT: vsetvli zero, a0 , e8, mf2, ta, ma
86- ; CHECK-NEXT: vslideup.vx v8, v9, a1
84+ ; CHECK-NEXT: vsetvli a1, zero , e8, mf2, ta, ma
85+ ; CHECK-NEXT: vslideup.vx v8, v9, a0
8786; CHECK-NEXT: ret
8887 %v = call <vscale x 4 x i8 > @llvm.vector.insert.nxv1i8.nxv4i8 (<vscale x 4 x i8 > %vec , <vscale x 1 x i8 > %subvec , i64 3 )
8988 ret <vscale x 4 x i8 > %v
@@ -246,8 +245,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv1i32_1(<vscale x 16 x i32> %vec,
246245; CHECK: # %bb.0:
247246; CHECK-NEXT: csrr a0, vlenb
248247; CHECK-NEXT: srli a0, a0, 3
249- ; CHECK-NEXT: add a1, a0, a0
250- ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
248+ ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
251249; CHECK-NEXT: vslideup.vx v8, v16, a0
252250; CHECK-NEXT: ret
253251 %v = call <vscale x 16 x i32 > @llvm.vector.insert.nxv1i32.nxv16i32 (<vscale x 16 x i32 > %vec , <vscale x 1 x i32 > %subvec , i64 1 )
@@ -282,8 +280,8 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_1(<vscale x 16 x i8> %vec, <vsc
282280; CHECK-LABEL: insert_nxv16i8_nxv1i8_1:
283281; CHECK: # %bb.0:
284282; CHECK-NEXT: csrr a0, vlenb
283+ ; CHECK-NEXT: srli a1, a0, 2
285284; CHECK-NEXT: srli a0, a0, 3
286- ; CHECK-NEXT: add a1, a0, a0
287285; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
288286; CHECK-NEXT: vslideup.vx v8, v10, a0
289287; CHECK-NEXT: ret
@@ -296,8 +294,9 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_2(<vscale x 16 x i8> %vec, <vsc
296294; CHECK: # %bb.0:
297295; CHECK-NEXT: csrr a0, vlenb
298296; CHECK-NEXT: srli a1, a0, 3
297+ ; CHECK-NEXT: slli a2, a1, 1
298+ ; CHECK-NEXT: add a1, a2, a1
299299; CHECK-NEXT: srli a0, a0, 2
300- ; CHECK-NEXT: add a1, a0, a1
301300; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
302301; CHECK-NEXT: vslideup.vx v8, v10, a0
303302; CHECK-NEXT: ret
@@ -309,10 +308,10 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_3(<vscale x 16 x i8> %vec, <vsc
309308; CHECK-LABEL: insert_nxv16i8_nxv1i8_3:
310309; CHECK: # %bb.0:
311310; CHECK-NEXT: csrr a0, vlenb
312- ; CHECK-NEXT: srli a0 , a0, 3
313- ; CHECK-NEXT: slli a1, a0 , 1
314- ; CHECK-NEXT: add a1, a1, a0
315- ; CHECK-NEXT: add a0, a1, a0
311+ ; CHECK-NEXT: srli a1 , a0, 3
312+ ; CHECK-NEXT: slli a2, a1 , 1
313+ ; CHECK-NEXT: add a1, a2, a1
314+ ; CHECK-NEXT: srli a0, a0, 1
316315; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
317316; CHECK-NEXT: vslideup.vx v8, v10, a1
318317; CHECK-NEXT: ret
@@ -363,8 +362,7 @@ define <vscale x 32 x half> @insert_nxv32f16_nxv2f16_2(<vscale x 32 x half> %vec
363362; CHECK: # %bb.0:
364363; CHECK-NEXT: csrr a0, vlenb
365364; CHECK-NEXT: srli a0, a0, 2
366- ; CHECK-NEXT: add a1, a0, a0
367- ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
365+ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
368366; CHECK-NEXT: vslideup.vx v8, v16, a0
369367; CHECK-NEXT: ret
370368 %v = call <vscale x 32 x half > @llvm.vector.insert.nxv2f16.nxv32f16 (<vscale x 32 x half > %vec , <vscale x 2 x half > %subvec , i64 2 )
@@ -376,8 +374,7 @@ define <vscale x 32 x half> @insert_nxv32f16_nxv2f16_26(<vscale x 32 x half> %ve
376374; CHECK: # %bb.0:
377375; CHECK-NEXT: csrr a0, vlenb
378376; CHECK-NEXT: srli a0, a0, 2
379- ; CHECK-NEXT: add a1, a0, a0
380- ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
377+ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
381378; CHECK-NEXT: vslideup.vx v14, v16, a0
382379; CHECK-NEXT: ret
383380 %v = call <vscale x 32 x half > @llvm.vector.insert.nxv2f16.nxv32f16 (<vscale x 32 x half > %vec , <vscale x 2 x half > %subvec , i64 26 )
@@ -397,8 +394,9 @@ define <vscale x 32 x half> @insert_nxv32f16_undef_nxv1f16_26(<vscale x 1 x half
397394; CHECK: # %bb.0:
398395; CHECK-NEXT: csrr a0, vlenb
399396; CHECK-NEXT: srli a1, a0, 3
397+ ; CHECK-NEXT: slli a2, a1, 1
398+ ; CHECK-NEXT: add a1, a2, a1
400399; CHECK-NEXT: srli a0, a0, 2
401- ; CHECK-NEXT: add a1, a0, a1
402400; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
403401; CHECK-NEXT: vslideup.vx v14, v8, a0
404402; CHECK-NEXT: ret
@@ -422,8 +420,8 @@ define <vscale x 32 x i1> @insert_nxv32i1_nxv8i1_8(<vscale x 32 x i1> %v, <vscal
422420; CHECK-LABEL: insert_nxv32i1_nxv8i1_8:
423421; CHECK: # %bb.0:
424422; CHECK-NEXT: csrr a0, vlenb
423+ ; CHECK-NEXT: srli a1, a0, 2
425424; CHECK-NEXT: srli a0, a0, 3
426- ; CHECK-NEXT: add a1, a0, a0
427425; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
428426; CHECK-NEXT: vslideup.vx v0, v8, a0
429427; CHECK-NEXT: ret
@@ -462,10 +460,11 @@ define <vscale x 4 x i1> @insert_nxv4i1_nxv1i1_2(<vscale x 4 x i1> %v, <vscale x
462460; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
463461; CHECK-NEXT: vmv.v.i v10, 0
464462; CHECK-NEXT: srli a1, a0, 3
465- ; CHECK-NEXT: srli a0, a0, 2
466- ; CHECK-NEXT: add a1, a0, a1
467463; CHECK-NEXT: vmv1r.v v0, v8
468464; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
465+ ; CHECK-NEXT: slli a2, a1, 1
466+ ; CHECK-NEXT: add a1, a2, a1
467+ ; CHECK-NEXT: srli a0, a0, 2
469468; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
470469; CHECK-NEXT: vslideup.vx v9, v8, a0
471470; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
@@ -570,8 +569,7 @@ define <vscale x 32 x bfloat> @insert_nxv32bf16_nxv2bf16_2(<vscale x 32 x bfloat
570569; CHECK: # %bb.0:
571570; CHECK-NEXT: csrr a0, vlenb
572571; CHECK-NEXT: srli a0, a0, 2
573- ; CHECK-NEXT: add a1, a0, a0
574- ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
572+ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
575573; CHECK-NEXT: vslideup.vx v8, v16, a0
576574; CHECK-NEXT: ret
577575 %v = call <vscale x 32 x bfloat> @llvm.vector.insert.nxv2bf16.nxv32bf16 (<vscale x 32 x bfloat> %vec , <vscale x 2 x bfloat> %subvec , i64 2 )
@@ -583,8 +581,7 @@ define <vscale x 32 x bfloat> @insert_nxv32bf16_nxv2bf16_26(<vscale x 32 x bfloa
583581; CHECK: # %bb.0:
584582; CHECK-NEXT: csrr a0, vlenb
585583; CHECK-NEXT: srli a0, a0, 2
586- ; CHECK-NEXT: add a1, a0, a0
587- ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
584+ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
588585; CHECK-NEXT: vslideup.vx v14, v16, a0
589586; CHECK-NEXT: ret
590587 %v = call <vscale x 32 x bfloat> @llvm.vector.insert.nxv2bf16.nxv32bf16 (<vscale x 32 x bfloat> %vec , <vscale x 2 x bfloat> %subvec , i64 26 )
@@ -604,8 +601,9 @@ define <vscale x 32 x bfloat> @insert_nxv32bf16_undef_nxv1bf16_26(<vscale x 1 x
604601; CHECK: # %bb.0:
605602; CHECK-NEXT: csrr a0, vlenb
606603; CHECK-NEXT: srli a1, a0, 3
604+ ; CHECK-NEXT: slli a2, a1, 1
605+ ; CHECK-NEXT: add a1, a2, a1
607606; CHECK-NEXT: srli a0, a0, 2
608- ; CHECK-NEXT: add a1, a0, a1
609607; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
610608; CHECK-NEXT: vslideup.vx v14, v8, a0
611609; CHECK-NEXT: ret
0 commit comments