Skip to content

Conversation

@arsenm
Copy link
Contributor

@arsenm arsenm commented Nov 1, 2025

No description provided.

Copy link
Contributor Author

arsenm commented Nov 1, 2025

Warning

This pull request is not mergeable via GitHub because a downstack PR is open. Once all requirements are satisfied, merge this PR as a stack on Graphite.
Learn more

This stack of pull requests is managed by Graphite. Learn more about stacking.

@llvmbot
Copy link
Member

llvmbot commented Nov 1, 2025

@llvm/pr-subscribers-backend-risc-v
@llvm/pr-subscribers-backend-hexagon

@llvm/pr-subscribers-llvm-regalloc

Author: Matt Arsenault (arsenm)

Changes

Full diff: https://github.com/llvm/llvm-project/pull/165961.diff

8 Files Affected:

  • (modified) llvm/lib/Target/RISCV/RISCVSubtarget.h (+1)
  • (modified) llvm/test/CodeGen/RISCV/branch-on-zero.ll (+6-10)
  • (modified) llvm/test/CodeGen/RISCV/machine-pipeliner.ll (+23-23)
  • (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll (+4-6)
  • (modified) llvm/test/CodeGen/RISCV/rvv/pr95865.ll (+21-22)
  • (modified) llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll (+33-33)
  • (modified) llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll (+14-14)
  • (modified) llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll (+12-12)
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index 4b4fc8f0d8e76..817fb7cb5c41a 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -147,6 +147,7 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
   }
 
   bool enableMachineScheduler() const override { return true; }
+  bool enableTerminalRule() const override { return true; }
 
   bool enablePostRAScheduler() const override { return UsePostRAScheduler; }
 
diff --git a/llvm/test/CodeGen/RISCV/branch-on-zero.ll b/llvm/test/CodeGen/RISCV/branch-on-zero.ll
index 02aeebdeb3775..2aec92eca145f 100644
--- a/llvm/test/CodeGen/RISCV/branch-on-zero.ll
+++ b/llvm/test/CodeGen/RISCV/branch-on-zero.ll
@@ -127,13 +127,11 @@ define i32 @test_lshr2(ptr nocapture %x, ptr nocapture readonly %y, i32 %n) {
 ; RV32-NEXT:  .LBB3_2: # %while.body
 ; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-NEXT:    lw a3, 0(a1)
-; RV32-NEXT:    addi a4, a1, 4
+; RV32-NEXT:    addi a1, a1, 4
 ; RV32-NEXT:    slli a3, a3, 1
-; RV32-NEXT:    addi a1, a0, 4
 ; RV32-NEXT:    sw a3, 0(a0)
-; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:    mv a1, a4
-; RV32-NEXT:    bne a4, a2, .LBB3_2
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    bne a1, a2, .LBB3_2
 ; RV32-NEXT:  .LBB3_3: # %while.end
 ; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
@@ -151,13 +149,11 @@ define i32 @test_lshr2(ptr nocapture %x, ptr nocapture readonly %y, i32 %n) {
 ; RV64-NEXT:  .LBB3_2: # %while.body
 ; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NEXT:    lw a3, 0(a1)
-; RV64-NEXT:    addi a4, a1, 4
+; RV64-NEXT:    addi a1, a1, 4
 ; RV64-NEXT:    slli a3, a3, 1
-; RV64-NEXT:    addi a1, a0, 4
 ; RV64-NEXT:    sw a3, 0(a0)
-; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:    mv a1, a4
-; RV64-NEXT:    bne a4, a2, .LBB3_2
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    bne a1, a2, .LBB3_2
 ; RV64-NEXT:  .LBB3_3: # %while.end
 ; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/machine-pipeliner.ll b/llvm/test/CodeGen/RISCV/machine-pipeliner.ll
index d250098576687..a2a7da7e2d6ef 100644
--- a/llvm/test/CodeGen/RISCV/machine-pipeliner.ll
+++ b/llvm/test/CodeGen/RISCV/machine-pipeliner.ll
@@ -54,37 +54,37 @@ define void @test_pipelined_1(ptr noalias %in, ptr noalias %out, i32 signext %cn
 ; CHECK-PIPELINED:       # %bb.0: # %entry
 ; CHECK-PIPELINED-NEXT:    blez a2, .LBB1_6
 ; CHECK-PIPELINED-NEXT:  # %bb.1: # %for.body.preheader
-; CHECK-PIPELINED-NEXT:    lw a4, 0(a1)
+; CHECK-PIPELINED-NEXT:    lw a7, 0(a1)
 ; CHECK-PIPELINED-NEXT:    addi a2, a2, -1
+; CHECK-PIPELINED-NEXT:    addi a3, a0, 4
+; CHECK-PIPELINED-NEXT:    addi a5, a1, 4
 ; CHECK-PIPELINED-NEXT:    sh2add.uw a6, a2, a1
-; CHECK-PIPELINED-NEXT:    addi a2, a0, 4
-; CHECK-PIPELINED-NEXT:    addi a1, a1, 4
 ; CHECK-PIPELINED-NEXT:    addi a6, a6, 4
-; CHECK-PIPELINED-NEXT:    beq a1, a6, .LBB1_5
+; CHECK-PIPELINED-NEXT:    beq a5, a6, .LBB1_5
 ; CHECK-PIPELINED-NEXT:  # %bb.2: # %for.body
-; CHECK-PIPELINED-NEXT:    lw a5, 0(a1)
-; CHECK-PIPELINED-NEXT:    addi a3, a2, 4
-; CHECK-PIPELINED-NEXT:    addi a4, a4, 1
-; CHECK-PIPELINED-NEXT:    addi a1, a1, 4
-; CHECK-PIPELINED-NEXT:    beq a1, a6, .LBB1_4
+; CHECK-PIPELINED-NEXT:    lw a1, 0(a5)
+; CHECK-PIPELINED-NEXT:    addi a4, a3, 4
+; CHECK-PIPELINED-NEXT:    addi a5, a5, 4
+; CHECK-PIPELINED-NEXT:    beq a5, a6, .LBB1_4
 ; CHECK-PIPELINED-NEXT:  .LBB1_3: # %for.body
 ; CHECK-PIPELINED-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-PIPELINED-NEXT:    sw a4, 0(a0)
-; CHECK-PIPELINED-NEXT:    mv a4, a5
-; CHECK-PIPELINED-NEXT:    lw a5, 0(a1)
-; CHECK-PIPELINED-NEXT:    mv a0, a2
-; CHECK-PIPELINED-NEXT:    mv a2, a3
-; CHECK-PIPELINED-NEXT:    addi a3, a3, 4
-; CHECK-PIPELINED-NEXT:    addi a4, a4, 1
-; CHECK-PIPELINED-NEXT:    addi a1, a1, 4
-; CHECK-PIPELINED-NEXT:    bne a1, a6, .LBB1_3
+; CHECK-PIPELINED-NEXT:    addi a2, a7, 1
+; CHECK-PIPELINED-NEXT:    mv a7, a1
+; CHECK-PIPELINED-NEXT:    lw a1, 0(a5)
+; CHECK-PIPELINED-NEXT:    sw a2, 0(a0)
+; CHECK-PIPELINED-NEXT:    mv a0, a3
+; CHECK-PIPELINED-NEXT:    mv a3, a4
+; CHECK-PIPELINED-NEXT:    addi a4, a4, 4
+; CHECK-PIPELINED-NEXT:    addi a5, a5, 4
+; CHECK-PIPELINED-NEXT:    bne a5, a6, .LBB1_3
 ; CHECK-PIPELINED-NEXT:  .LBB1_4:
-; CHECK-PIPELINED-NEXT:    sw a4, 0(a0)
-; CHECK-PIPELINED-NEXT:    mv a0, a2
-; CHECK-PIPELINED-NEXT:    mv a4, a5
+; CHECK-PIPELINED-NEXT:    addi a7, a7, 1
+; CHECK-PIPELINED-NEXT:    sw a7, 0(a0)
+; CHECK-PIPELINED-NEXT:    mv a0, a3
+; CHECK-PIPELINED-NEXT:    mv a7, a1
 ; CHECK-PIPELINED-NEXT:  .LBB1_5:
-; CHECK-PIPELINED-NEXT:    addi a4, a4, 1
-; CHECK-PIPELINED-NEXT:    sw a4, 0(a0)
+; CHECK-PIPELINED-NEXT:    addi a7, a7, 1
+; CHECK-PIPELINED-NEXT:    sw a7, 0(a0)
 ; CHECK-PIPELINED-NEXT:  .LBB1_6: # %for.end
 ; CHECK-PIPELINED-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
index 9c6d77dde1b5c..c3fe6b335d3da 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
@@ -44,9 +44,8 @@ define <4 x i64> @m2_splat_with_tail(<4 x i64> %v1) vscale_range(2,2) {
 ; CHECK-LABEL: m2_splat_with_tail:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT:    vrgather.vi v10, v8, 0
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vrgather.vi v8, v10, 0
 ; CHECK-NEXT:    ret
   %res = shufflevector <4 x i64> %v1, <4 x i64> poison, <4 x i32> <i32 0, i32 0, i32 2, i32 3>
   ret <4 x i64> %res
@@ -99,9 +98,8 @@ define <4 x i64> @m2_splat_into_identity(<4 x i64> %v1) vscale_range(2,2) {
 ; CHECK-LABEL: m2_splat_into_identity:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT:    vrgather.vi v10, v8, 0
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vrgather.vi v8, v10, 0
 ; CHECK-NEXT:    ret
   %res = shufflevector <4 x i64> %v1, <4 x i64> poison, <4 x i32> <i32 0, i32 0, i32 2, i32 3>
   ret <4 x i64> %res
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr95865.ll b/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
index ab9849631663c..a4c793b49d54a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
@@ -36,7 +36,7 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
 ; CHECK-NEXT:    .cfi_offset s10, -96
 ; CHECK-NEXT:    .cfi_offset s11, -104
 ; CHECK-NEXT:    li a6, 0
-; CHECK-NEXT:    li s2, 8
+; CHECK-NEXT:    li a7, 8
 ; CHECK-NEXT:    li t0, 12
 ; CHECK-NEXT:    li s0, 4
 ; CHECK-NEXT:    li t1, 20
@@ -45,7 +45,7 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    andi t3, a4, 1
-; CHECK-NEXT:    li t2, 4
+; CHECK-NEXT:    li s2, 4
 ; CHECK-NEXT:  .LBB0_1: # %for.cond1.preheader.i
 ; CHECK-NEXT:    # =>This Loop Header: Depth=1
 ; CHECK-NEXT:    # Child Loop BB0_2 Depth 2
@@ -53,9 +53,9 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
 ; CHECK-NEXT:    # Child Loop BB0_4 Depth 4
 ; CHECK-NEXT:    # Child Loop BB0_5 Depth 5
 ; CHECK-NEXT:    mv t4, t1
-; CHECK-NEXT:    mv t5, t2
+; CHECK-NEXT:    mv t2, s2
 ; CHECK-NEXT:    mv t6, t0
-; CHECK-NEXT:    mv a7, s2
+; CHECK-NEXT:    mv s3, a7
 ; CHECK-NEXT:    mv s4, a6
 ; CHECK-NEXT:  .LBB0_2: # %for.cond5.preheader.i
 ; CHECK-NEXT:    # Parent Loop BB0_1 Depth=1
@@ -64,9 +64,9 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
 ; CHECK-NEXT:    # Child Loop BB0_4 Depth 4
 ; CHECK-NEXT:    # Child Loop BB0_5 Depth 5
 ; CHECK-NEXT:    mv s5, t4
-; CHECK-NEXT:    mv s6, t5
+; CHECK-NEXT:    mv t5, t2
 ; CHECK-NEXT:    mv s7, t6
-; CHECK-NEXT:    mv s3, a7
+; CHECK-NEXT:    mv s8, s3
 ; CHECK-NEXT:    mv s9, s4
 ; CHECK-NEXT:  .LBB0_3: # %for.cond9.preheader.i
 ; CHECK-NEXT:    # Parent Loop BB0_1 Depth=1
@@ -75,9 +75,9 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
 ; CHECK-NEXT:    # Child Loop BB0_4 Depth 4
 ; CHECK-NEXT:    # Child Loop BB0_5 Depth 5
 ; CHECK-NEXT:    mv s11, s5
-; CHECK-NEXT:    mv a3, s6
+; CHECK-NEXT:    mv s6, t5
 ; CHECK-NEXT:    mv ra, s7
-; CHECK-NEXT:    mv s8, s3
+; CHECK-NEXT:    mv a5, s8
 ; CHECK-NEXT:    mv s1, s9
 ; CHECK-NEXT:  .LBB0_4: # %vector.ph.i
 ; CHECK-NEXT:    # Parent Loop BB0_1 Depth=1
@@ -92,45 +92,44 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
 ; CHECK-NEXT:    # Parent Loop BB0_3 Depth=3
 ; CHECK-NEXT:    # Parent Loop BB0_4 Depth=4
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=5
-; CHECK-NEXT:    addi a5, a1, 4
-; CHECK-NEXT:    add a4, s8, a1
-; CHECK-NEXT:    add a1, a1, a3
+; CHECK-NEXT:    add a4, a5, a1
+; CHECK-NEXT:    add a3, s6, a1
+; CHECK-NEXT:    addi a1, a1, 4
 ; CHECK-NEXT:    vse32.v v8, (a4), v0.t
-; CHECK-NEXT:    vse32.v v8, (a1), v0.t
-; CHECK-NEXT:    mv a1, a5
-; CHECK-NEXT:    bne a5, s0, .LBB0_5
+; CHECK-NEXT:    vse32.v v8, (a3), v0.t
+; CHECK-NEXT:    bne a1, s0, .LBB0_5
 ; CHECK-NEXT:  # %bb.6: # %for.cond.cleanup15.i
 ; CHECK-NEXT:    # in Loop: Header=BB0_4 Depth=4
 ; CHECK-NEXT:    addi s1, s1, 4
-; CHECK-NEXT:    addi s8, s8, 4
+; CHECK-NEXT:    addi a5, a5, 4
 ; CHECK-NEXT:    addi ra, ra, 4
-; CHECK-NEXT:    addi a3, a3, 4
+; CHECK-NEXT:    addi s6, s6, 4
 ; CHECK-NEXT:    andi s10, a0, 1
 ; CHECK-NEXT:    addi s11, s11, 4
 ; CHECK-NEXT:    beqz s10, .LBB0_4
 ; CHECK-NEXT:  # %bb.7: # %for.cond.cleanup11.i
 ; CHECK-NEXT:    # in Loop: Header=BB0_3 Depth=3
 ; CHECK-NEXT:    addi s9, s9, 4
-; CHECK-NEXT:    addi s3, s3, 4
+; CHECK-NEXT:    addi s8, s8, 4
 ; CHECK-NEXT:    addi s7, s7, 4
-; CHECK-NEXT:    addi s6, s6, 4
+; CHECK-NEXT:    addi t5, t5, 4
 ; CHECK-NEXT:    andi a1, a2, 1
 ; CHECK-NEXT:    addi s5, s5, 4
 ; CHECK-NEXT:    beqz a1, .LBB0_3
 ; CHECK-NEXT:  # %bb.8: # %for.cond.cleanup7.i
 ; CHECK-NEXT:    # in Loop: Header=BB0_2 Depth=2
 ; CHECK-NEXT:    addi s4, s4, 4
-; CHECK-NEXT:    addi a7, a7, 4
+; CHECK-NEXT:    addi s3, s3, 4
 ; CHECK-NEXT:    addi t6, t6, 4
-; CHECK-NEXT:    addi t5, t5, 4
+; CHECK-NEXT:    addi t2, t2, 4
 ; CHECK-NEXT:    addi t4, t4, 4
 ; CHECK-NEXT:    beqz t3, .LBB0_2
 ; CHECK-NEXT:  # %bb.9: # %for.cond.cleanup3.i
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    addi a6, a6, 4
-; CHECK-NEXT:    addi s2, s2, 4
+; CHECK-NEXT:    addi a7, a7, 4
 ; CHECK-NEXT:    addi t0, t0, 4
-; CHECK-NEXT:    addi t2, t2, 4
+; CHECK-NEXT:    addi s2, s2, 4
 ; CHECK-NEXT:    addi t1, t1, 4
 ; CHECK-NEXT:    beqz a1, .LBB0_1
 ; CHECK-NEXT:  # %bb.10: # %l.exit
diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
index f295bd8d74df3..386c736128794 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
@@ -2258,18 +2258,18 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
 ; CHECK-RV32-NEXT:    vsetvli a7, zero, e32, m2, ta, ma
 ; CHECK-RV32-NEXT:  .LBB98_3: # %vector.body
 ; CHECK-RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-RV32-NEXT:    slli a7, a6, 2
-; CHECK-RV32-NEXT:    add t0, a6, a4
-; CHECK-RV32-NEXT:    add a7, a0, a7
-; CHECK-RV32-NEXT:    vl2re32.v v8, (a7)
-; CHECK-RV32-NEXT:    sltu a6, t0, a6
-; CHECK-RV32-NEXT:    add a5, a5, a6
-; CHECK-RV32-NEXT:    xor a6, t0, a3
+; CHECK-RV32-NEXT:    mv a7, a6
+; CHECK-RV32-NEXT:    slli t0, a6, 2
+; CHECK-RV32-NEXT:    add a6, a6, a4
+; CHECK-RV32-NEXT:    add t0, a0, t0
+; CHECK-RV32-NEXT:    vl2re32.v v8, (t0)
+; CHECK-RV32-NEXT:    sltu a7, a6, a7
+; CHECK-RV32-NEXT:    add a5, a5, a7
+; CHECK-RV32-NEXT:    xor a7, a6, a3
 ; CHECK-RV32-NEXT:    vand.vx v8, v8, a1
-; CHECK-RV32-NEXT:    or t1, a6, a5
-; CHECK-RV32-NEXT:    vs2r.v v8, (a7)
-; CHECK-RV32-NEXT:    mv a6, t0
-; CHECK-RV32-NEXT:    bnez t1, .LBB98_3
+; CHECK-RV32-NEXT:    or a7, a7, a5
+; CHECK-RV32-NEXT:    vs2r.v v8, (t0)
+; CHECK-RV32-NEXT:    bnez a7, .LBB98_3
 ; CHECK-RV32-NEXT:  # %bb.4: # %middle.block
 ; CHECK-RV32-NEXT:    bnez a3, .LBB98_6
 ; CHECK-RV32-NEXT:  .LBB98_5: # %for.body
@@ -2350,18 +2350,18 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
 ; CHECK-ZVKB-NOZBB32-NEXT:    vsetvli a7, zero, e32, m2, ta, ma
 ; CHECK-ZVKB-NOZBB32-NEXT:  .LBB98_3: # %vector.body
 ; CHECK-ZVKB-NOZBB32-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-ZVKB-NOZBB32-NEXT:    slli a7, a6, 2
-; CHECK-ZVKB-NOZBB32-NEXT:    add t0, a6, a4
-; CHECK-ZVKB-NOZBB32-NEXT:    add a7, a0, a7
-; CHECK-ZVKB-NOZBB32-NEXT:    vl2re32.v v8, (a7)
-; CHECK-ZVKB-NOZBB32-NEXT:    sltu a6, t0, a6
-; CHECK-ZVKB-NOZBB32-NEXT:    add a5, a5, a6
-; CHECK-ZVKB-NOZBB32-NEXT:    xor a6, t0, a3
+; CHECK-ZVKB-NOZBB32-NEXT:    mv a7, a6
+; CHECK-ZVKB-NOZBB32-NEXT:    slli t0, a6, 2
+; CHECK-ZVKB-NOZBB32-NEXT:    add a6, a6, a4
+; CHECK-ZVKB-NOZBB32-NEXT:    add t0, a0, t0
+; CHECK-ZVKB-NOZBB32-NEXT:    vl2re32.v v8, (t0)
+; CHECK-ZVKB-NOZBB32-NEXT:    sltu a7, a6, a7
+; CHECK-ZVKB-NOZBB32-NEXT:    add a5, a5, a7
+; CHECK-ZVKB-NOZBB32-NEXT:    xor a7, a6, a3
 ; CHECK-ZVKB-NOZBB32-NEXT:    vandn.vx v8, v8, a1
-; CHECK-ZVKB-NOZBB32-NEXT:    or t1, a6, a5
-; CHECK-ZVKB-NOZBB32-NEXT:    vs2r.v v8, (a7)
-; CHECK-ZVKB-NOZBB32-NEXT:    mv a6, t0
-; CHECK-ZVKB-NOZBB32-NEXT:    bnez t1, .LBB98_3
+; CHECK-ZVKB-NOZBB32-NEXT:    or a7, a7, a5
+; CHECK-ZVKB-NOZBB32-NEXT:    vs2r.v v8, (t0)
+; CHECK-ZVKB-NOZBB32-NEXT:    bnez a7, .LBB98_3
 ; CHECK-ZVKB-NOZBB32-NEXT:  # %bb.4: # %middle.block
 ; CHECK-ZVKB-NOZBB32-NEXT:    bnez a3, .LBB98_7
 ; CHECK-ZVKB-NOZBB32-NEXT:  .LBB98_5: # %for.body.preheader
@@ -2444,18 +2444,18 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
 ; CHECK-ZVKB-ZBB32-NEXT:    vsetvli a7, zero, e32, m2, ta, ma
 ; CHECK-ZVKB-ZBB32-NEXT:  .LBB98_3: # %vector.body
 ; CHECK-ZVKB-ZBB32-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-ZVKB-ZBB32-NEXT:    slli a7, a6, 2
-; CHECK-ZVKB-ZBB32-NEXT:    add t0, a6, a4
-; CHECK-ZVKB-ZBB32-NEXT:    add a7, a0, a7
-; CHECK-ZVKB-ZBB32-NEXT:    vl2re32.v v8, (a7)
-; CHECK-ZVKB-ZBB32-NEXT:    sltu a6, t0, a6
-; CHECK-ZVKB-ZBB32-NEXT:    add a5, a5, a6
-; CHECK-ZVKB-ZBB32-NEXT:    xor a6, t0, a3
+; CHECK-ZVKB-ZBB32-NEXT:    mv a7, a6
+; CHECK-ZVKB-ZBB32-NEXT:    slli t0, a6, 2
+; CHECK-ZVKB-ZBB32-NEXT:    add a6, a6, a4
+; CHECK-ZVKB-ZBB32-NEXT:    add t0, a0, t0
+; CHECK-ZVKB-ZBB32-NEXT:    vl2re32.v v8, (t0)
+; CHECK-ZVKB-ZBB32-NEXT:    sltu a7, a6, a7
+; CHECK-ZVKB-ZBB32-NEXT:    add a5, a5, a7
+; CHECK-ZVKB-ZBB32-NEXT:    xor a7, a6, a3
 ; CHECK-ZVKB-ZBB32-NEXT:    vandn.vx v8, v8, a1
-; CHECK-ZVKB-ZBB32-NEXT:    or t1, a6, a5
-; CHECK-ZVKB-ZBB32-NEXT:    vs2r.v v8, (a7)
-; CHECK-ZVKB-ZBB32-NEXT:    mv a6, t0
-; CHECK-ZVKB-ZBB32-NEXT:    bnez t1, .LBB98_3
+; CHECK-ZVKB-ZBB32-NEXT:    or a7, a7, a5
+; CHECK-ZVKB-ZBB32-NEXT:    vs2r.v v8, (t0)
+; CHECK-ZVKB-ZBB32-NEXT:    bnez a7, .LBB98_3
 ; CHECK-ZVKB-ZBB32-NEXT:  # %bb.4: # %middle.block
 ; CHECK-ZVKB-ZBB32-NEXT:    bnez a3, .LBB98_6
 ; CHECK-ZVKB-ZBB32-NEXT:  .LBB98_5: # %for.body
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll
index ed6b7f1e6efb8..10440089cff10 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll
@@ -25,24 +25,24 @@ define dso_local void @test_store1(ptr nocapture noundef writeonly %dst, ptr noc
 ; RV32-NEXT:    li a6, 0
 ; RV32-NEXT:  .LBB0_4: # %vector.body
 ; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-NEXT:    slli t0, a7, 2
-; RV32-NEXT:    addi t1, a7, 8
-; RV32-NEXT:    add t0, a1, t0
+; RV32-NEXT:    mv t0, a7
+; RV32-NEXT:    slli t1, a7, 2
+; RV32-NEXT:    addi a7, a7, 8
+; RV32-NEXT:    add t1, a1, t1
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vle32.v v8, (t0)
-; RV32-NEXT:    sltu a7, t1, a7
-; RV32-NEXT:    xor t0, t1, a5
-; RV32-NEXT:    add a6, a6, a7
+; RV32-NEXT:    vle32.v v8, (t1)
+; RV32-NEXT:    sltu t0, a7, t0
+; RV32-NEXT:    xor t1, a7, a5
+; RV32-NEXT:    add a6, a6, t0
 ; RV32-NEXT:    vmslt.vx v12, v8, a2
 ; RV32-NEXT:    vcompress.vm v10, v8, v12
-; RV32-NEXT:    vcpop.m a7, v12
-; RV32-NEXT:    vsetvli zero, a7, e32, m2, ta, ma
+; RV32-NEXT:    vcpop.m t0, v12
+; RV32-NEXT:    vsetvli zero, t0, e32, m2, ta, ma
 ; RV32-NEXT:    vse32.v v10, (a0)
-; RV32-NEXT:    slli a7, a7, 2
-; RV32-NEXT:    or t0, t0, a6
-; RV32-NEXT:    add a0, a0, a7
-; RV32-NEXT:    mv a7, t1
-; RV32-NEXT:    bnez t0, .LBB0_4
+; RV32-NEXT:    slli t0, t0, 2
+; RV32-NEXT:    or t1, t1, a6
+; RV32-NEXT:    add a0, a0, t0
+; RV32-NEXT:    bnez t1, .LBB0_4
 ; RV32-NEXT:  # %bb.5: # %middle.block
 ; RV32-NEXT:    bne a5, a3, .LBB0_9
 ; RV32-NEXT:  .LBB0_6: # %for.cond.cleanup
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
index ead79fcf53d8b..af3b0852a6461 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
@@ -102,20 +102,20 @@ define void @test1(ptr nocapture noundef writeonly %dst, i32 noundef signext %i_
 ; RV32-NEXT:  .LBB0_13: # %vector.body
 ; RV32-NEXT:    # Parent Loop BB0_10 Depth=1
 ; RV32-NEXT:    # => This Inner Loop Header: Depth=2
-; RV32-NEXT:    add s0, a2, t6
-; RV32-NEXT:    add s1, a4, t6
-; RV32-NEXT:    vl2r.v v8, (s0)
-; RV32-NEXT:    add s0, a0, t6
+; RV32-NEXT:    mv s0, t6
+; RV32-NEXT:    add t6, a2, t6
+; RV32-NEXT:    add s1, a4, s0
+; RV32-NEXT:    vl2r.v v8, (t6)
+; RV32-NEXT:    add s2, a0, s0
 ; RV32-NEXT:    vl2r.v v10, (s1)
-; RV32-NEXT:    add s1, t6, t2
-; RV32-NEXT:    sltu t6, s1, t6
-; RV32-NEXT:    add t5, t5, t6
-; RV32-NEXT:    xor t6, s1, t4
+; RV32-NEXT:    add t6, s0, t2
+; RV32-NEXT:    sltu s0, t6, s0
+; RV32-NEXT:    add t5, t5, s0
+; RV32-NEXT:    xor s0, t6, t4
 ; RV32-NEXT:    vaaddu.vv v8, v8, v10
-; RV32-NEXT:    or s2, t6, t5
-; RV32-NEXT:    vs2r.v v8, (s0)
-; RV32-NEXT:    mv t6, s1
-; RV32-NEXT:    bnez s2, .LBB0_13
+; RV32-NEXT:    or s0, s0, t5
+; RV32-NEXT:    vs2r.v v8, (s2)
+; RV32-NEXT:    bnez s0, .LBB0_13
 ; RV32-NEXT:  # %bb.14: # %middle.block
 ; RV32-NEXT:    # in Loop: Header=BB0_10 Depth=1
 ; RV32-NEXT:    beq t4, a6, .LBB0_9

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants