@@ -1445,10 +1445,9 @@ define <vscale x 1 x i64> @vp_bitreverse_nxv1i64(<vscale x 1 x i64> %va, <vscale
14451445; RV32-NEXT: addi a6, sp, 8
14461446; RV32-NEXT: sw a4, 8(sp)
14471447; RV32-NEXT: sw zero, 12(sp)
1448- ; RV32-NEXT: vsetvli a4, zero , e64, m1, ta, ma
1448+ ; RV32-NEXT: vsetvli zero, a0 , e64, m1, ta, ma
14491449; RV32-NEXT: vlse64.v v9, (a6), zero
14501450; RV32-NEXT: lui a4, 61681
1451- ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
14521451; RV32-NEXT: vsll.vx v10, v8, a3, v0.t
14531452; RV32-NEXT: addi a5, a5, -256
14541453; RV32-NEXT: vand.vx v11, v8, a5, v0.t
@@ -1595,9 +1594,7 @@ define <vscale x 1 x i64> @vp_bitreverse_nxv1i64_unmasked(<vscale x 1 x i64> %va
15951594; RV32-NEXT: vand.vx v13, v8, a1
15961595; RV32-NEXT: vand.vx v12, v12, a1
15971596; RV32-NEXT: vor.vv v11, v12, v11
1598- ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
15991597; RV32-NEXT: vlse64.v v12, (a6), zero
1600- ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
16011598; RV32-NEXT: vsll.vx v13, v13, a4
16021599; RV32-NEXT: vor.vv v10, v10, v13
16031600; RV32-NEXT: vsrl.vi v13, v8, 8
@@ -1730,10 +1727,9 @@ define <vscale x 2 x i64> @vp_bitreverse_nxv2i64(<vscale x 2 x i64> %va, <vscale
17301727; RV32-NEXT: addi a6, sp, 8
17311728; RV32-NEXT: sw a4, 8(sp)
17321729; RV32-NEXT: sw zero, 12(sp)
1733- ; RV32-NEXT: vsetvli a4, zero , e64, m2, ta, ma
1730+ ; RV32-NEXT: vsetvli zero, a0 , e64, m2, ta, ma
17341731; RV32-NEXT: vlse64.v v10, (a6), zero
17351732; RV32-NEXT: lui a4, 61681
1736- ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
17371733; RV32-NEXT: vsll.vx v12, v8, a3, v0.t
17381734; RV32-NEXT: addi a5, a5, -256
17391735; RV32-NEXT: vand.vx v14, v8, a5, v0.t
@@ -1880,9 +1876,7 @@ define <vscale x 2 x i64> @vp_bitreverse_nxv2i64_unmasked(<vscale x 2 x i64> %va
18801876; RV32-NEXT: vand.vx v18, v8, a1
18811877; RV32-NEXT: vand.vx v16, v16, a1
18821878; RV32-NEXT: vor.vv v10, v16, v10
1883- ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
18841879; RV32-NEXT: vlse64.v v16, (a6), zero
1885- ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
18861880; RV32-NEXT: vsll.vx v18, v18, a4
18871881; RV32-NEXT: vor.vv v12, v12, v18
18881882; RV32-NEXT: vsrl.vi v18, v8, 8
@@ -2015,10 +2009,9 @@ define <vscale x 4 x i64> @vp_bitreverse_nxv4i64(<vscale x 4 x i64> %va, <vscale
20152009; RV32-NEXT: addi a6, sp, 8
20162010; RV32-NEXT: sw a4, 8(sp)
20172011; RV32-NEXT: sw zero, 12(sp)
2018- ; RV32-NEXT: vsetvli a4, zero , e64, m4, ta, ma
2012+ ; RV32-NEXT: vsetvli zero, a0 , e64, m4, ta, ma
20192013; RV32-NEXT: vlse64.v v12, (a6), zero
20202014; RV32-NEXT: lui a4, 61681
2021- ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
20222015; RV32-NEXT: vsll.vx v16, v8, a3, v0.t
20232016; RV32-NEXT: addi a5, a5, -256
20242017; RV32-NEXT: vand.vx v20, v8, a5, v0.t
@@ -2165,9 +2158,7 @@ define <vscale x 4 x i64> @vp_bitreverse_nxv4i64_unmasked(<vscale x 4 x i64> %va
21652158; RV32-NEXT: vand.vx v28, v8, a1
21662159; RV32-NEXT: vand.vx v24, v24, a1
21672160; RV32-NEXT: vor.vv v12, v24, v12
2168- ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
21692161; RV32-NEXT: vlse64.v v24, (a6), zero
2170- ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
21712162; RV32-NEXT: vsll.vx v28, v28, a4
21722163; RV32-NEXT: vor.vv v16, v16, v28
21732164; RV32-NEXT: vsrl.vi v28, v8, 8
@@ -2315,15 +2306,13 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale
23152306; RV32-NEXT: add a3, sp, a3
23162307; RV32-NEXT: addi a3, a3, 16
23172308; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
2318- ; RV32-NEXT: vsetvli a3, zero, e64, m8, ta, ma
23192309; RV32-NEXT: vlse64.v v16, (a5), zero
23202310; RV32-NEXT: csrr a3, vlenb
23212311; RV32-NEXT: slli a3, a3, 3
23222312; RV32-NEXT: add a3, sp, a3
23232313; RV32-NEXT: addi a3, a3, 16
23242314; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
23252315; RV32-NEXT: lui a3, 4080
2326- ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
23272316; RV32-NEXT: vand.vx v24, v8, a3, v0.t
23282317; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
23292318; RV32-NEXT: addi a5, sp, 16
@@ -2528,9 +2517,7 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64_unmasked(<vscale x 7 x i64> %va
25282517; RV32-NEXT: add a1, sp, a1
25292518; RV32-NEXT: addi a1, a1, 16
25302519; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
2531- ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
25322520; RV32-NEXT: vlse64.v v24, (a6), zero
2533- ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
25342521; RV32-NEXT: vsrl.vi v16, v8, 24
25352522; RV32-NEXT: vand.vx v16, v16, a5
25362523; RV32-NEXT: vsrl.vi v0, v8, 8
@@ -2704,15 +2691,13 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale
27042691; RV32-NEXT: add a3, sp, a3
27052692; RV32-NEXT: addi a3, a3, 16
27062693; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
2707- ; RV32-NEXT: vsetvli a3, zero, e64, m8, ta, ma
27082694; RV32-NEXT: vlse64.v v16, (a5), zero
27092695; RV32-NEXT: csrr a3, vlenb
27102696; RV32-NEXT: slli a3, a3, 3
27112697; RV32-NEXT: add a3, sp, a3
27122698; RV32-NEXT: addi a3, a3, 16
27132699; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
27142700; RV32-NEXT: lui a3, 4080
2715- ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
27162701; RV32-NEXT: vand.vx v24, v8, a3, v0.t
27172702; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
27182703; RV32-NEXT: addi a5, sp, 16
@@ -2917,9 +2902,7 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64_unmasked(<vscale x 8 x i64> %va
29172902; RV32-NEXT: add a1, sp, a1
29182903; RV32-NEXT: addi a1, a1, 16
29192904; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
2920- ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
29212905; RV32-NEXT: vlse64.v v24, (a6), zero
2922- ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
29232906; RV32-NEXT: vsrl.vi v16, v8, 24
29242907; RV32-NEXT: vand.vx v16, v16, a5
29252908; RV32-NEXT: vsrl.vi v0, v8, 8
0 commit comments