@@ -247,34 +247,34 @@ define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @v
247247; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
248248; RV32-NEXT: csrr s1, vlenb
249249; RV32-NEXT: vsetivli zero, 2, e8, m1, ta, ma
250- ; RV32-NEXT: vslidedown.vi v10 , v8, 10
251- ; RV32-NEXT: vslidedown.vi v11 , v8, 8
250+ ; RV32-NEXT: vslidedown.vi v11 , v8, 10
251+ ; RV32-NEXT: vslidedown.vi v10 , v8, 8
252252; RV32-NEXT: vslidedown.vi v9, v8, 2
253253; RV32-NEXT: srli s0, s1, 3
254254; RV32-NEXT: add a0, s0, s0
255255; RV32-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
256- ; RV32-NEXT: vslideup.vx v11, v10 , s0
257- ; RV32-NEXT: vmv1r.v v10 , v8
258- ; RV32-NEXT: vslideup.vx v10 , v9, s0
256+ ; RV32-NEXT: vslideup.vx v10, v11 , s0
257+ ; RV32-NEXT: vmv1r.v v11 , v8
258+ ; RV32-NEXT: vslideup.vx v11 , v9, s0
259259; RV32-NEXT: vsetivli zero, 2, e8, m1, ta, ma
260260; RV32-NEXT: vslidedown.vi v9, v8, 12
261261; RV32-NEXT: srli a0, s1, 2
262262; RV32-NEXT: add a1, a0, s0
263263; RV32-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
264- ; RV32-NEXT: vslideup.vx v11 , v9, a0
264+ ; RV32-NEXT: vslideup.vx v10 , v9, a0
265265; RV32-NEXT: csrr a2, vlenb
266266; RV32-NEXT: slli a2, a2, 1
267267; RV32-NEXT: add a2, sp, a2
268268; RV32-NEXT: addi a2, a2, 32
269- ; RV32-NEXT: vs1r.v v11 , (a2) # Unknown-size Folded Spill
269+ ; RV32-NEXT: vs1r.v v10 , (a2) # Unknown-size Folded Spill
270270; RV32-NEXT: vsetivli zero, 2, e8, m1, ta, ma
271271; RV32-NEXT: vslidedown.vi v9, v8, 4
272272; RV32-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
273- ; RV32-NEXT: vslideup.vx v10 , v9, a0
273+ ; RV32-NEXT: vslideup.vx v11 , v9, a0
274274; RV32-NEXT: csrr a0, vlenb
275275; RV32-NEXT: add a0, sp, a0
276276; RV32-NEXT: addi a0, a0, 32
277- ; RV32-NEXT: vs1r.v v10 , (a0) # Unknown-size Folded Spill
277+ ; RV32-NEXT: vs1r.v v11 , (a0) # Unknown-size Folded Spill
278278; RV32-NEXT: li a1, 3
279279; RV32-NEXT: mv a0, s0
280280; RV32-NEXT: call __mulsi3
@@ -338,34 +338,34 @@ define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @v
338338; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
339339; RV64-NEXT: csrr s1, vlenb
340340; RV64-NEXT: vsetivli zero, 2, e8, m1, ta, ma
341- ; RV64-NEXT: vslidedown.vi v10 , v8, 10
342- ; RV64-NEXT: vslidedown.vi v11 , v8, 8
341+ ; RV64-NEXT: vslidedown.vi v11 , v8, 10
342+ ; RV64-NEXT: vslidedown.vi v10 , v8, 8
343343; RV64-NEXT: vslidedown.vi v9, v8, 2
344344; RV64-NEXT: srli s0, s1, 3
345345; RV64-NEXT: add a0, s0, s0
346346; RV64-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
347- ; RV64-NEXT: vslideup.vx v11, v10 , s0
348- ; RV64-NEXT: vmv1r.v v10 , v8
349- ; RV64-NEXT: vslideup.vx v10 , v9, s0
347+ ; RV64-NEXT: vslideup.vx v10, v11 , s0
348+ ; RV64-NEXT: vmv1r.v v11 , v8
349+ ; RV64-NEXT: vslideup.vx v11 , v9, s0
350350; RV64-NEXT: vsetivli zero, 2, e8, m1, ta, ma
351351; RV64-NEXT: vslidedown.vi v9, v8, 12
352352; RV64-NEXT: srli a0, s1, 2
353353; RV64-NEXT: add a1, a0, s0
354354; RV64-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
355- ; RV64-NEXT: vslideup.vx v11 , v9, a0
355+ ; RV64-NEXT: vslideup.vx v10 , v9, a0
356356; RV64-NEXT: csrr a2, vlenb
357357; RV64-NEXT: slli a2, a2, 1
358358; RV64-NEXT: add a2, sp, a2
359359; RV64-NEXT: addi a2, a2, 32
360- ; RV64-NEXT: vs1r.v v11 , (a2) # Unknown-size Folded Spill
360+ ; RV64-NEXT: vs1r.v v10 , (a2) # Unknown-size Folded Spill
361361; RV64-NEXT: vsetivli zero, 2, e8, m1, ta, ma
362362; RV64-NEXT: vslidedown.vi v9, v8, 4
363363; RV64-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
364- ; RV64-NEXT: vslideup.vx v10 , v9, a0
364+ ; RV64-NEXT: vslideup.vx v11 , v9, a0
365365; RV64-NEXT: csrr a0, vlenb
366366; RV64-NEXT: add a0, sp, a0
367367; RV64-NEXT: addi a0, a0, 32
368- ; RV64-NEXT: vs1r.v v10 , (a0) # Unknown-size Folded Spill
368+ ; RV64-NEXT: vs1r.v v11 , (a0) # Unknown-size Folded Spill
369369; RV64-NEXT: li a1, 3
370370; RV64-NEXT: mv a0, s0
371371; RV64-NEXT: call __muldi3
0 commit comments