Skip to content

Conversation

@arsenm
Copy link
Contributor

@arsenm arsenm commented Feb 13, 2025

This reverts d246cc6. We now handle
composing subregister extracts through reg_sequence.

Copy link
Contributor Author

arsenm commented Feb 13, 2025

@arsenm arsenm marked this pull request as ready for review February 13, 2025 12:28
@llvmbot
Copy link
Member

llvmbot commented Feb 13, 2025

@llvm/pr-subscribers-llvm-globalisel
@llvm/pr-subscribers-backend-amdgpu
@llvm/pr-subscribers-llvm-regalloc

@llvm/pr-subscribers-tablegen

Author: Matt Arsenault (arsenm)

Changes

This reverts d246cc6. We now handle
composing subregister extracts through reg_sequence.


Patch is 576.74 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/127052.diff

61 Files Affected:

  • (modified) llvm/lib/CodeGen/PeepholeOptimizer.cpp (-6)
  • (modified) llvm/test/CodeGen/AMDGPU/GlobalISel/sdivrem.ll (+112-112)
  • (modified) llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll (+28-28)
  • (modified) llvm/test/CodeGen/AMDGPU/call-argument-types.ll (+24-39)
  • (modified) llvm/test/CodeGen/AMDGPU/calling-conventions.ll (+110-117)
  • (modified) llvm/test/CodeGen/AMDGPU/copy-illegal-type.ll (+36-36)
  • (modified) llvm/test/CodeGen/AMDGPU/ctpop64.ll (+18-18)
  • (modified) llvm/test/CodeGen/AMDGPU/div_v2i128.ll (+10-10)
  • (modified) llvm/test/CodeGen/AMDGPU/ds-sub-offset.ll (+5-8)
  • (modified) llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll (+31-27)
  • (modified) llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmax.ll (+33-29)
  • (modified) llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmin.ll (+33-29)
  • (modified) llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll (+61-53)
  • (modified) llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system.ll (+140-124)
  • (modified) llvm/test/CodeGen/AMDGPU/fptoi.i128.ll (+178-178)
  • (modified) llvm/test/CodeGen/AMDGPU/fptrunc.ll (+37-37)
  • (modified) llvm/test/CodeGen/AMDGPU/function-args.ll (+36-55)
  • (modified) llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll (+171-298)
  • (modified) llvm/test/CodeGen/AMDGPU/idot4s.ll (+62-62)
  • (modified) llvm/test/CodeGen/AMDGPU/idot4u.ll (+155-155)
  • (modified) llvm/test/CodeGen/AMDGPU/idot8u.ll (+18-18)
  • (modified) llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll (+44-44)
  • (modified) llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll (+150-148)
  • (modified) llvm/test/CodeGen/AMDGPU/kernel-args.ll (+3-6)
  • (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll (+440-440)
  • (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll (+81-81)
  • (modified) llvm/test/CodeGen/AMDGPU/llvm.exp.ll (+16-16)
  • (modified) llvm/test/CodeGen/AMDGPU/llvm.exp10.ll (+16-16)
  • (modified) llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll (+52-53)
  • (modified) llvm/test/CodeGen/AMDGPU/llvm.set.rounding.ll (+2-4)
  • (modified) llvm/test/CodeGen/AMDGPU/load-constant-i16.ll (+62-62)
  • (modified) llvm/test/CodeGen/AMDGPU/load-constant-i32.ll (+16-16)
  • (modified) llvm/test/CodeGen/AMDGPU/load-global-i16.ll (+29-29)
  • (modified) llvm/test/CodeGen/AMDGPU/load-global-i32.ll (+156-210)
  • (modified) llvm/test/CodeGen/AMDGPU/move-to-valu-atomicrmw-system.ll (+10-9)
  • (modified) llvm/test/CodeGen/AMDGPU/mul.ll (+25-25)
  • (modified) llvm/test/CodeGen/AMDGPU/mul_int24.ll (+15-18)
  • (modified) llvm/test/CodeGen/AMDGPU/select.f16.ll (+38-38)
  • (modified) llvm/test/CodeGen/AMDGPU/shl.ll (+11-11)
  • (modified) llvm/test/CodeGen/AMDGPU/shufflevector.v2i64.v8i64.ll (+304-304)
  • (modified) llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll (+109-118)
  • (modified) llvm/test/CodeGen/AMDGPU/spill-vgpr.ll (+8-8)
  • (modified) llvm/test/CodeGen/AMDGPU/sra.ll (+11-11)
  • (modified) llvm/test/CodeGen/AMDGPU/srl.ll (+11-11)
  • (modified) llvm/test/CodeGen/AMDGPU/udiv.ll (+2-4)
  • (modified) llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll (+21-21)
  • (modified) llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll (+1-9)
  • (modified) llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll (+59-59)
  • (modified) llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll (+66-81)
  • (modified) llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-mixed-cases.ll (+26-32)
  • (modified) llvm/test/CodeGen/Thumb2/mve-laneinterleaving-cost.ll (+27-27)
  • (modified) llvm/test/CodeGen/Thumb2/mve-shuffle.ll (+53-90)
  • (modified) llvm/test/CodeGen/Thumb2/mve-vabdus.ll (+25-25)
  • (modified) llvm/test/CodeGen/Thumb2/mve-vld2.ll (+9-9)
  • (modified) llvm/test/CodeGen/Thumb2/mve-vld3.ll (+70-66)
  • (modified) llvm/test/CodeGen/Thumb2/mve-vld4.ll (+33-33)
  • (modified) llvm/test/CodeGen/Thumb2/mve-vldst4.ll (+55-55)
  • (modified) llvm/test/CodeGen/Thumb2/mve-vst2.ll (+18-18)
  • (modified) llvm/test/CodeGen/Thumb2/mve-vst3.ll (+214-209)
  • (modified) llvm/test/CodeGen/Thumb2/mve-vst4-post.ll (+20-20)
  • (modified) llvm/test/CodeGen/Thumb2/mve-vst4.ll (+69-69)
diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
index 24bd9938bc45c..5416cdd39aaf3 100644
--- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
@@ -421,12 +421,6 @@ class RegSequenceRewriter : public Rewriter {
   }
 
   bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
-    // Do not introduce new subregister uses in a reg_sequence. Until composing
-    // subregister indices is supported while folding, we're just blocking
-    // folding of subregister copies later in the function.
-    if (NewSubReg)
-      return false;
-
     MachineOperand &MO = CopyLike.getOperand(CurrentSrcIdx);
     MO.setReg(NewReg);
     MO.setSubReg(NewSubReg);
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdivrem.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdivrem.ll
index d41601cc0d76e..40f29c56c8f12 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdivrem.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdivrem.ll
@@ -1635,7 +1635,6 @@ define amdgpu_kernel void @sdivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
 ; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v0
 ; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v4, v1, vcc
 ; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], s16, v3, 0
-; GFX9-NEXT:    v_mov_b32_e32 v7, s11
 ; GFX9-NEXT:    v_mad_u64_u32 v[1:2], s[0:1], s16, v4, v[1:2]
 ; GFX9-NEXT:    v_mul_hi_u32 v6, v3, v0
 ; GFX9-NEXT:    v_mad_u64_u32 v[1:2], s[0:1], s17, v3, v[1:2]
@@ -1683,32 +1682,33 @@ define amdgpu_kernel void @sdivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
 ; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], s8, v5, 0
 ; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
 ; GFX9-NEXT:    v_add_u32_e32 v3, v4, v3
-; GFX9-NEXT:    v_add3_u32 v6, v3, v2, v6
-; GFX9-NEXT:    v_mad_u64_u32 v[1:2], s[0:1], s8, v6, v[1:2]
-; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s10, v0
-; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[0:1], s9, v5, v[1:2]
+; GFX9-NEXT:    v_add3_u32 v3, v3, v2, v6
+; GFX9-NEXT:    v_mad_u64_u32 v[1:2], s[0:1], s8, v3, v[1:2]
+; GFX9-NEXT:    v_mov_b32_e32 v6, s11
+; GFX9-NEXT:    v_sub_co_u32_e32 v7, vcc, s10, v0
+; GFX9-NEXT:    v_mad_u64_u32 v[1:2], s[0:1], s9, v5, v[1:2]
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s9
 ; GFX9-NEXT:    s_ashr_i32 s10, s3, 31
-; GFX9-NEXT:    v_subb_co_u32_e64 v1, s[0:1], v7, v2, vcc
-; GFX9-NEXT:    v_sub_u32_e32 v2, s11, v2
-; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s9, v1
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, -1, s[0:1]
-; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s8, v0
-; GFX9-NEXT:    v_subb_co_u32_e32 v2, vcc, v2, v4, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
-; GFX9-NEXT:    v_cmp_eq_u32_e64 s[0:1], s9, v1
-; GFX9-NEXT:    v_subrev_co_u32_e32 v8, vcc, s8, v0
-; GFX9-NEXT:    v_cndmask_b32_e64 v7, v3, v7, s[0:1]
-; GFX9-NEXT:    v_subbrev_co_u32_e64 v9, s[0:1], 0, v2, vcc
-; GFX9-NEXT:    v_add_co_u32_e64 v10, s[0:1], 1, v5
-; GFX9-NEXT:    v_addc_co_u32_e64 v11, s[0:1], 0, v6, s[0:1]
-; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s9, v9
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, -1, s[0:1]
-; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s8, v8
+; GFX9-NEXT:    v_subb_co_u32_e64 v6, s[0:1], v6, v1, vcc
+; GFX9-NEXT:    v_sub_u32_e32 v0, s11, v1
+; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s9, v6
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, -1, s[0:1]
+; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s8, v7
+; GFX9-NEXT:    v_subb_co_u32_e32 v0, vcc, v0, v4, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, -1, s[0:1]
+; GFX9-NEXT:    v_cmp_eq_u32_e64 s[0:1], s9, v6
+; GFX9-NEXT:    v_subrev_co_u32_e32 v9, vcc, s8, v7
+; GFX9-NEXT:    v_cndmask_b32_e64 v8, v1, v2, s[0:1]
+; GFX9-NEXT:    v_subbrev_co_u32_e64 v10, s[0:1], 0, v0, vcc
+; GFX9-NEXT:    v_add_co_u32_e64 v2, s[0:1], 1, v5
+; GFX9-NEXT:    v_addc_co_u32_e64 v11, s[0:1], 0, v3, s[0:1]
+; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s9, v10
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, -1, s[0:1]
+; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s8, v9
 ; GFX9-NEXT:    v_cndmask_b32_e64 v12, 0, -1, s[0:1]
-; GFX9-NEXT:    v_cmp_eq_u32_e64 s[0:1], s9, v9
-; GFX9-NEXT:    v_cndmask_b32_e64 v12, v3, v12, s[0:1]
-; GFX9-NEXT:    v_add_co_u32_e64 v13, s[0:1], 1, v10
+; GFX9-NEXT:    v_cmp_eq_u32_e64 s[0:1], s9, v10
+; GFX9-NEXT:    v_cndmask_b32_e64 v12, v1, v12, s[0:1]
+; GFX9-NEXT:    v_add_co_u32_e64 v13, s[0:1], 1, v2
 ; GFX9-NEXT:    v_addc_co_u32_e64 v14, s[0:1], 0, v11, s[0:1]
 ; GFX9-NEXT:    s_add_u32 s0, s18, s6
 ; GFX9-NEXT:    s_addc_u32 s1, s19, s6
@@ -1716,116 +1716,116 @@ define amdgpu_kernel void @sdivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
 ; GFX9-NEXT:    s_mov_b32 s11, s10
 ; GFX9-NEXT:    s_addc_u32 s3, s3, s10
 ; GFX9-NEXT:    s_xor_b64 s[2:3], s[2:3], s[10:11]
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v3, s3
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s3
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v15, s2
-; GFX9-NEXT:    v_subb_co_u32_e32 v2, vcc, v2, v4, vcc
-; GFX9-NEXT:    v_mul_f32_e32 v3, 0x4f800000, v3
-; GFX9-NEXT:    v_add_f32_e32 v3, v3, v15
-; GFX9-NEXT:    v_rcp_iflag_f32_e32 v3, v3
-; GFX9-NEXT:    v_subrev_co_u32_e32 v15, vcc, s8, v8
-; GFX9-NEXT:    v_subbrev_co_u32_e32 v16, vcc, 0, v2, vcc
-; GFX9-NEXT:    v_mul_f32_e32 v2, 0x5f7ffffc, v3
-; GFX9-NEXT:    v_mul_f32_e32 v3, 0x2f800000, v2
-; GFX9-NEXT:    v_trunc_f32_e32 v4, v3
-; GFX9-NEXT:    v_mul_f32_e32 v3, 0xcf800000, v4
-; GFX9-NEXT:    v_add_f32_e32 v2, v3, v2
-; GFX9-NEXT:    v_cvt_u32_f32_e32 v17, v2
+; GFX9-NEXT:    v_subb_co_u32_e32 v0, vcc, v0, v4, vcc
+; GFX9-NEXT:    v_mul_f32_e32 v1, 0x4f800000, v1
+; GFX9-NEXT:    v_add_f32_e32 v1, v1, v15
+; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v1
+; GFX9-NEXT:    v_subrev_co_u32_e32 v4, vcc, s8, v9
+; GFX9-NEXT:    v_subbrev_co_u32_e32 v15, vcc, 0, v0, vcc
+; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v1
+; GFX9-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; GFX9-NEXT:    v_trunc_f32_e32 v16, v1
+; GFX9-NEXT:    v_mul_f32_e32 v1, 0xcf800000, v16
+; GFX9-NEXT:    v_add_f32_e32 v0, v1, v0
+; GFX9-NEXT:    v_cvt_u32_f32_e32 v17, v0
 ; GFX9-NEXT:    s_xor_b64 s[8:9], s[0:1], s[6:7]
 ; GFX9-NEXT:    s_sub_u32 s5, 0, s2
 ; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v12
-; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[0:1], s5, v17, 0
-; GFX9-NEXT:    v_cvt_u32_f32_e32 v12, v4
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], s5, v17, 0
+; GFX9-NEXT:    v_cndmask_b32_e32 v12, v2, v13, vcc
+; GFX9-NEXT:    v_cvt_u32_f32_e32 v13, v16
 ; GFX9-NEXT:    s_subb_u32 s20, 0, s3
 ; GFX9-NEXT:    v_cndmask_b32_e32 v11, v11, v14, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v10, v10, v13, vcc
-; GFX9-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], s5, v12, v[3:4]
-; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v7
-; GFX9-NEXT:    v_mul_lo_u32 v7, v12, v2
-; GFX9-NEXT:    v_mad_u64_u32 v[3:4], s[18:19], s20, v17, v[3:4]
-; GFX9-NEXT:    v_cndmask_b32_e64 v4, v6, v11, s[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e32 v6, v8, v15, vcc
-; GFX9-NEXT:    v_mul_lo_u32 v8, v17, v3
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v5, v10, s[0:1]
-; GFX9-NEXT:    v_mul_hi_u32 v10, v17, v2
-; GFX9-NEXT:    v_cndmask_b32_e32 v9, v9, v16, vcc
-; GFX9-NEXT:    v_add_co_u32_e32 v7, vcc, v7, v8
-; GFX9-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX9-NEXT:    v_add_co_u32_e32 v7, vcc, v7, v10
-; GFX9-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; GFX9-NEXT:    v_mul_lo_u32 v10, v12, v3
-; GFX9-NEXT:    v_mul_hi_u32 v2, v12, v2
-; GFX9-NEXT:    v_add_u32_e32 v7, v8, v7
-; GFX9-NEXT:    v_mul_hi_u32 v8, v17, v3
-; GFX9-NEXT:    v_mul_hi_u32 v3, v12, v3
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v10, v2
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v9, v4, vcc
+; GFX9-NEXT:    v_mad_u64_u32 v[1:2], s[0:1], s5, v13, v[1:2]
+; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v8
+; GFX9-NEXT:    v_cndmask_b32_e64 v8, v3, v11, s[0:1]
+; GFX9-NEXT:    v_mad_u64_u32 v[1:2], s[18:19], s20, v17, v[1:2]
+; GFX9-NEXT:    v_mul_lo_u32 v2, v13, v0
+; GFX9-NEXT:    v_cndmask_b32_e32 v9, v10, v15, vcc
+; GFX9-NEXT:    v_mul_lo_u32 v3, v17, v1
+; GFX9-NEXT:    v_mul_hi_u32 v10, v17, v0
+; GFX9-NEXT:    v_mul_hi_u32 v0, v13, v0
+; GFX9-NEXT:    v_cndmask_b32_e64 v5, v5, v12, s[0:1]
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v3
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v10
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT:    v_mul_lo_u32 v10, v13, v1
+; GFX9-NEXT:    v_add_u32_e32 v2, v3, v2
+; GFX9-NEXT:    v_mul_hi_u32 v3, v17, v1
+; GFX9-NEXT:    v_mul_hi_u32 v1, v13, v1
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v10, v0
 ; GFX9-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v8
-; GFX9-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v7
-; GFX9-NEXT:    v_add_u32_e32 v8, v10, v8
-; GFX9-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; GFX9-NEXT:    v_add3_u32 v3, v8, v7, v3
-; GFX9-NEXT:    v_add_co_u32_e32 v7, vcc, v17, v2
-; GFX9-NEXT:    v_addc_co_u32_e32 v8, vcc, v12, v3, vcc
-; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[18:19], s5, v7, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, v0, v6, s[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v9, v1, v9, s[0:1]
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v3
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT:    v_add_u32_e32 v3, v10, v3
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v10, vcc, v17, v0
+; GFX9-NEXT:    v_add3_u32 v1, v3, v2, v1
+; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[18:19], s5, v10, 0
+; GFX9-NEXT:    v_addc_co_u32_e32 v11, vcc, v13, v1, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v0, v3
-; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], s5, v8, v[0:1]
-; GFX9-NEXT:    v_xor_b32_e32 v10, s17, v4
+; GFX9-NEXT:    v_cndmask_b32_e64 v7, v7, v4, s[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e64 v6, v6, v9, s[0:1]
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], s5, v11, v[0:1]
 ; GFX9-NEXT:    v_xor_b32_e32 v5, s16, v5
-; GFX9-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], s20, v7, v[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v11, s17
+; GFX9-NEXT:    v_xor_b32_e32 v8, s17, v8
+; GFX9-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], s20, v10, v[0:1]
+; GFX9-NEXT:    v_mov_b32_e32 v9, s17
 ; GFX9-NEXT:    v_subrev_co_u32_e32 v0, vcc, s16, v5
-; GFX9-NEXT:    v_xor_b32_e32 v4, s4, v6
-; GFX9-NEXT:    v_mul_lo_u32 v5, v8, v2
-; GFX9-NEXT:    v_mul_lo_u32 v6, v7, v3
-; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v10, v11, vcc
-; GFX9-NEXT:    v_mul_hi_u32 v10, v7, v2
-; GFX9-NEXT:    v_add_co_u32_e32 v5, vcc, v5, v6
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; GFX9-NEXT:    v_add_co_u32_e32 v5, vcc, v5, v10
+; GFX9-NEXT:    v_xor_b32_e32 v4, s4, v7
+; GFX9-NEXT:    v_mul_lo_u32 v5, v11, v2
+; GFX9-NEXT:    v_mul_lo_u32 v7, v10, v3
+; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v8, v9, vcc
+; GFX9-NEXT:    v_mul_hi_u32 v8, v10, v2
+; GFX9-NEXT:    v_add_co_u32_e32 v5, vcc, v5, v7
+; GFX9-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v5, vcc, v5, v8
 ; GFX9-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; GFX9-NEXT:    v_mul_lo_u32 v10, v8, v3
-; GFX9-NEXT:    v_mul_hi_u32 v2, v8, v2
-; GFX9-NEXT:    v_add_u32_e32 v5, v6, v5
-; GFX9-NEXT:    v_mul_hi_u32 v6, v7, v3
-; GFX9-NEXT:    v_mul_hi_u32 v3, v8, v3
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v10, v2
-; GFX9-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v6
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX9-NEXT:    v_mul_lo_u32 v8, v11, v3
+; GFX9-NEXT:    v_mul_hi_u32 v2, v11, v2
+; GFX9-NEXT:    v_add_u32_e32 v5, v7, v5
+; GFX9-NEXT:    v_mul_hi_u32 v7, v10, v3
+; GFX9-NEXT:    v_mul_hi_u32 v3, v11, v3
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v8, v2
+; GFX9-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v7
+; GFX9-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v5
-; GFX9-NEXT:    v_add_u32_e32 v6, v10, v6
+; GFX9-NEXT:    v_add_u32_e32 v7, v8, v7
 ; GFX9-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; GFX9-NEXT:    v_add3_u32 v3, v6, v5, v3
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v7, v2
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v8, v3, vcc
+; GFX9-NEXT:    v_add3_u32 v3, v7, v5, v3
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v10, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v11, v3, vcc
 ; GFX9-NEXT:    v_mul_lo_u32 v5, s9, v2
-; GFX9-NEXT:    v_mul_lo_u32 v6, s8, v3
-; GFX9-NEXT:    v_mul_hi_u32 v8, s8, v2
+; GFX9-NEXT:    v_mul_lo_u32 v7, s8, v3
+; GFX9-NEXT:    v_mul_hi_u32 v9, s8, v2
 ; GFX9-NEXT:    v_mul_hi_u32 v2, s9, v2
 ; GFX9-NEXT:    v_mul_hi_u32 v12, s9, v3
-; GFX9-NEXT:    v_add_co_u32_e32 v5, vcc, v5, v6
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; GFX9-NEXT:    v_add_co_u32_e32 v5, vcc, v5, v8
+; GFX9-NEXT:    v_add_co_u32_e32 v5, vcc, v5, v7
+; GFX9-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v5, vcc, v5, v9
 ; GFX9-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; GFX9-NEXT:    v_mul_lo_u32 v8, s9, v3
-; GFX9-NEXT:    v_add_u32_e32 v5, v6, v5
-; GFX9-NEXT:    v_mul_hi_u32 v6, s8, v3
-; GFX9-NEXT:    v_xor_b32_e32 v9, s4, v9
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v8, v2
-; GFX9-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v6
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX9-NEXT:    v_mul_lo_u32 v9, s9, v3
+; GFX9-NEXT:    v_add_u32_e32 v5, v7, v5
+; GFX9-NEXT:    v_mul_hi_u32 v7, s8, v3
+; GFX9-NEXT:    v_xor_b32_e32 v6, s4, v6
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v9, v2
+; GFX9-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v7
+; GFX9-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v10, vcc, v2, v5
 ; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[0:1], s2, v10, 0
+; GFX9-NEXT:    v_mov_b32_e32 v8, s4
 ; GFX9-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
-; GFX9-NEXT:    v_add_u32_e32 v6, v8, v6
-; GFX9-NEXT:    v_mov_b32_e32 v7, s4
 ; GFX9-NEXT:    v_subrev_co_u32_e32 v4, vcc, s4, v4
+; GFX9-NEXT:    v_subb_co_u32_e32 v5, vcc, v6, v8, vcc
+; GFX9-NEXT:    v_add_u32_e32 v6, v9, v7
 ; GFX9-NEXT:    v_add3_u32 v8, v6, v11, v12
-; GFX9-NEXT:    v_subb_co_u32_e32 v5, vcc, v9, v7, vcc
 ; GFX9-NEXT:    v_mad_u64_u32 v[6:7], s[0:1], s2, v8, v[3:4]
 ; GFX9-NEXT:    v_mov_b32_e32 v9, s9
 ; GFX9-NEXT:    v_sub_co_u32_e32 v2, vcc, s8, v2
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
index 8bb8ecb079a34..bc89a186db010 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
@@ -2218,31 +2218,31 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
 ; GFX1264-NEXT:    s_clause 0x1
 ; GFX1264-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
 ; GFX1264-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
-; GFX1264-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1264-NEXT:    s_mov_b32 s11, 0
-; GFX1264-NEXT:    v_mbcnt_lo_u32_b32 v0, s6, 0
 ; GFX1264-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1264-NEXT:    s_mov_b32 s11, 0
+; GFX1264-NEXT:    v_mbcnt_lo_u32_b32 v0, s8, 0
+; GFX1264-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX1264-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1264-NEXT:    v_mbcnt_hi_u32_b32 v2, s7, v0
+; GFX1264-NEXT:    v_mbcnt_hi_u32_b32 v2, s9, v0
 ; GFX1264-NEXT:    ; implicit-def: $vgpr0_vgpr1
 ; GFX1264-NEXT:    v_cmpx_eq_u32_e32 0, v2
 ; GFX1264-NEXT:    s_cbranch_execz .LBB4_2
 ; GFX1264-NEXT:  ; %bb.1:
-; GFX1264-NEXT:    s_bcnt1_i32_b64 s10, s[6:7]
-; GFX1264-NEXT:    s_mov_b32 s15, 0x31016000
+; GFX1264-NEXT:    s_bcnt1_i32_b64 s10, s[8:9]
 ; GFX1264-NEXT:    s_wait_kmcnt 0x0
-; GFX1264-NEXT:    s_mul_u64 s[6:7], s[4:5], s[10:11]
-; GFX1264-NEXT:    s_mov_b32 s14, -1
+; GFX1264-NEXT:    s_mul_u64 s[8:9], s[4:5], s[10:11]
+; GFX1264-NEXT:    s_mov_b32 s11, 0x31016000
 ; GFX1264-NEXT:    s_wait_alu 0xfffe
-; GFX1264-NEXT:    v_mov_b32_e32 v0, s6
-; GFX1264-NEXT:    v_mov_b32_e32 v1, s7
-; GFX1264-NEXT:    s_mov_b32 s12, s2
-; GFX1264-NEXT:    s_mov_b32 s13, s3
-; GFX1264-NEXT:    buffer_atomic_add_u64 v[0:1], off, s[12:15], null th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1264-NEXT:    v_mov_b32_e32 v0, s8
+; GFX1264-NEXT:    v_mov_b32_e32 v1, s9
+; GFX1264-NEXT:    s_mov_b32 s10, -1
+; GFX1264-NEXT:    s_mov_b32 s8, s2
+; GFX1264-NEXT:    s_mov_b32 s9, s3
+; GFX1264-NEXT:    buffer_atomic_add_u64 v[0:1], off, s[8:11], null th:TH_ATOMIC_RETURN scope:SCOPE_DEV
 ; GFX1264-NEXT:    s_wait_loadcnt 0x0
 ; GFX1264-NEXT:    global_inv scope:SCOPE_DEV
 ; GFX1264-NEXT:  .LBB4_2:
-; GFX1264-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GFX1264-NEXT:    s_or_b64 exec, exec, s[6:7]
 ; GFX1264-NEXT:    s_wait_kmcnt 0x0
 ; GFX1264-NEXT:    v_readfirstlane_b32 s3, v1
 ; GFX1264-NEXT:    v_readfirstlane_b32 s2, v0
@@ -5800,31 +5800,31 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
 ; GFX1264-NEXT:    s_clause 0x1
 ; GFX1264-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
 ; GFX1264-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
-; GFX1264-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1264-NEXT:    s_mov_b32 s11, 0
-; GFX1264-NEXT:    v_mbcnt_lo_u32_b32 v0, s6, 0
 ; GFX1264-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1264-NEXT:    s_mov_b32 s11, 0
+; GFX1264-NEXT:    v_mbcnt_lo_u32_b32 v0, s8, 0
+; GFX1264-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX1264-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1264-NEXT:    v_mbcnt_hi_u32_b32 v2, s7, v0
+; GFX1264-NEXT:    v_mbcnt_hi_u32_b32 v2, s9, v0
 ; GFX1264-NEXT:    ; implicit-def: $vgpr0_vgpr1
 ; GFX1264-NEXT:    v_cmpx_eq_u32_e32 0, v2
 ; GFX1264-NEXT:    s_cbranch_execz .LBB10_2
 ; GFX1264-NEXT:  ; %bb.1:
-; GFX1264-NEXT:    s_bcnt1_i32_b64 s10, s[6:7]
-; GFX1264-NEXT:    s_mov_b32 s15, 0x31016000
+; GFX1264-NEXT:    s_bcnt1_i32_b64 s10, s[8:9]
 ; GFX1264-NEXT:    s_wait_kmcnt 0x0
-; GFX1264-NEXT:    s_mul_u64 s[6:7], s[4:5], s[10:11]
-; GFX1264-NEXT:    s_mov_b32 s14, -1
+; GFX1264-NEXT:    s_mul_u64 s[8:9], s[4:5], s[10:11]
+; GFX1264-NEXT:    s_mov_b32 s11, 0x31016000
 ; GFX1264-NEXT:    s_wait_alu 0xfffe
-; GFX1264-NEXT:    v_mov_b32_e32 v0, s6
-; GFX1264-NEXT:    v_mov_b32_e32 v1, s7
-; GFX1264-NEXT:    s_mov_b32 s12, s2
-; GFX1264-NEXT:    s_mov_b32 s13, s3
-; GFX1264-NEXT:    buffer_atomic_sub_u64 v[0:1], off, s[12:15], null th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1264-NEXT:    v_mov_b32_e32 v0, s8
+; GFX1264-NEXT:    v_mov_b32_e32 v1, s9
+; GFX1264-NEXT:    s_mov_b32 s10, -1
+; GFX1264-NEXT:    s_mov_b32 s8, s2
+; GFX1264-NEXT:    s_mov_b32 s9, s3
+; GFX1264-NEXT:    buffer_atomic_sub_u64 v[0:1], off, s[8:11], null th:TH_ATOMIC_RETURN scope:SCOPE_DEV
 ; GFX1264-NEXT:    s_wait_loadcnt 0x0
 ; GFX1264-NEXT:    global_inv scope:SCOPE_DEV
 ; GFX1264-NEXT:  .LBB10_2:
-; GFX1264-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GFX1264-NEXT:    s_or_b64 exec, exec, s[6:7]
 ; GFX1264-NEXT:    s_wait_kmcnt 0x0
 ; GFX1264-NEXT:    v_mad_co_u64_u32 v[3:4], null, s4, v2, 0
 ; GFX1264-NEXT:    v_readfirstlane_b32 s2, v0
diff --git a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
index 16fe85bf138b2..9bbecacd6c774 100644
--- a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
@@ -1147,12 +1147,11 @@ define amdgpu_kernel void @test_call_external_void_func_v2i64() #0 {
 ; VI-NEXT:    s_mov_b32 s37, SCRATCH_RSRC_DWORD1
 ; VI-NEXT:    s_mov_b32 s38, -1
 ; VI-NEXT:    s_mov_b32 s39, 0xe80000
-; VI-NEXT:    s_mov_b64 s[6:7], s[0:1]
-; VI-NEXT:    s_mov_b32 s0, 0
 ; VI-NEXT:    s_add_u32 s36, s36, s3
+; VI-NEXT:    s_mov_b64 s[6:7], s[0:1]
+; VI-NEXT:    s_mov_b64 s[0:1], 0
 ; VI-NEXT:    s_mov_b32 s3, 0xf000
 ; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s1, s0
 ; VI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_addc_u32 s37, s37, 0
 ; VI-NEXT:    s_mov_b64 s[0:1], s[36:37]
@@ -1170,12 +1169,11 @@ define amdgpu_kernel void @test_call_external_void_func_v2i64() #0 {
 ; CI-NEXT:    s_mov_b32 s37, SCRATCH_RSRC_DWORD1
 ; CI-NEXT:    s_mov_b32 s38, -1
 ; CI-NEXT:    s_mov_b32 s39, 0xe8f000
-; CI-NEXT:    s_mov_b64 s[6:7], s[0:1]
-; CI-NEXT:    s_mov_b32 s0, 0
 ; CI-NEXT:    s_add_u32 s36, s36, s3
+; CI-NEXT:    s_mov_b64 s[6:7], s[0:1]
+; CI-NEXT:    s_mov_b64 s[0:1], 0
 ; CI-NEXT:    s_mov_b32 s3, 0xf000
 ; CI-...
[truncated]

Copy link
Collaborator

@qcolombet qcolombet left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM

@arsenm arsenm force-pushed the users/arsenm/peephole-opt/handle-subregister-compose-through-reg-sequence branch from a009eec to e811d17 Compare February 17, 2025 15:13
Base automatically changed from users/arsenm/peephole-opt/handle-subregister-compose-through-reg-sequence to main February 18, 2025 01:07
This reverts d246cc6. We now handle
composing subregister extracts through reg_sequence.
@arsenm arsenm force-pushed the users/arsenm/peephole-opt/allow-introducing-subreg-uses-reg-sequence branch from c332c9c to 9ee6711 Compare February 21, 2025 15:08
Copy link
Contributor Author

arsenm commented Feb 22, 2025

Merge activity

  • Feb 21, 9:15 PM EST: A user started a stack merge that includes this pull request via Graphite.
  • Feb 21, 9:16 PM EST: A user merged this pull request with Graphite.

@arsenm arsenm merged commit 1bb4306 into main Feb 22, 2025
11 checks passed
@arsenm arsenm deleted the users/arsenm/peephole-opt/allow-introducing-subreg-uses-reg-sequence branch February 22, 2025 02:16
@llvm-ci
Copy link
Collaborator

llvm-ci commented Feb 22, 2025

LLVM Buildbot has detected a new failure on builder llvm-clang-x86_64-expensive-checks-ubuntu running on as-builder-4 while building llvm at step 6 "test-build-unified-tree-check-all".

Full details are available at: https://lab.llvm.org/buildbot/#/builders/187/builds/4471

Here is the relevant piece of the build log for the reference
Step 6 (test-build-unified-tree-check-all) failure: test (failure)
******************** TEST 'LLVM :: CodeGen/Thumb2/mve-complex-deinterleaving-mixed-cases.ll' FAILED ********************
Exit Code: 2

Command Output (stderr):
--
RUN: at line 2: /home/buildbot/worker/as-builder-4/ramdisk/expensive-checks/build/bin/llc < /home/buildbot/worker/as-builder-4/ramdisk/expensive-checks/llvm-project/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-mixed-cases.ll --mattr=+mve.fp,+fp64 -o - | /home/buildbot/worker/as-builder-4/ramdisk/expensive-checks/build/bin/FileCheck /home/buildbot/worker/as-builder-4/ramdisk/expensive-checks/llvm-project/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-mixed-cases.ll
+ /home/buildbot/worker/as-builder-4/ramdisk/expensive-checks/build/bin/FileCheck /home/buildbot/worker/as-builder-4/ramdisk/expensive-checks/llvm-project/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-mixed-cases.ll
+ /home/buildbot/worker/as-builder-4/ramdisk/expensive-checks/build/bin/llc --mattr=+mve.fp,+fp64 -o -

# After Peephole Optimizations
# Machine code for function mul_divequal: IsSSA, TracksLiveness
Frame Objects:
  fi#-2: size=16, align=8, fixed, at location [SP+16]
  fi#-1: size=16, align=8, fixed, at location [SP]
Function Live Ins: $r0 in %0, $r1 in %1, $r2 in %2, $r3 in %3

bb.0.entry:
  liveins: $r0, $r1, $r2, $r3
  %3:gpr = COPY $r3
  %2:gpr = COPY $r2
  %1:gpr = COPY $r1
  %0:gpr = COPY $r0
  %4:rgpr = t2ADDri %fixed-stack.0, 0, 14, $noreg, $noreg
  %5:dpr = VMOVDRR %2:gpr, %3:gpr, 14, $noreg
  %6:dpr = VMOVDRR %0:gpr, %1:gpr, 14, $noreg
  %7:rgpr = t2ADDri %fixed-stack.1, 0, 14, $noreg, $noreg
  %8:mqpr = MVE_VLDRWU32 killed %7:rgpr, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.1, align 8)
  %9:spr = COPY %8.ssub_2:mqpr
  %10:spr = COPY %8.ssub_0:mqpr
  %14:spr = IMPLICIT_DEF
  %16:spr = IMPLICIT_DEF
  %13:mqpr = REG_SEQUENCE %8.ssub_0:mqpr, %subreg.ssub_0, %9:spr, %subreg.ssub_1, %14:spr, %subreg.ssub_2, %16:spr, %subreg.ssub_3
  %18:spr = COPY %8.ssub_3:mqpr
  %19:spr = COPY %8.ssub_1:mqpr
  %23:spr = IMPLICIT_DEF
  %25:spr = IMPLICIT_DEF
  %22:mqpr = REG_SEQUENCE %19:spr, %subreg.ssub_0, %18:spr, %subreg.ssub_1, %23:spr, %subreg.ssub_2, %25:spr, %subreg.ssub_3
  %27:mqpr = MVE_VLDRWU32 killed %4:rgpr, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
  %28:spr = COPY %27.ssub_3:mqpr
  %30:mqpr = REG_SEQUENCE killed %6:dpr, %subreg.dsub_0, %5:dpr, %subreg.dsub_1
  %31:spr = COPY %30.ssub_3:mqpr
  %32:spr = COPY %30.ssub_1:mqpr
  %36:spr = IMPLICIT_DEF
  %38:spr = IMPLICIT_DEF
  %35:mqpr = REG_SEQUENCE %32:spr, %subreg.ssub_0, %5.ssub_1:dpr, %subreg.ssub_1, %36:spr, %subreg.ssub_2, %38:spr, %subreg.ssub_3
  %40:spr = COPY %30.ssub_2:mqpr
  %41:spr = COPY %30.ssub_0:mqpr
  %45:spr = IMPLICIT_DEF
  %47:spr = IMPLICIT_DEF
  %44:mqpr = REG_SEQUENCE %30.ssub_0:mqpr, %subreg.ssub_0, %40:spr, %subreg.ssub_1, %45:spr, %subreg.ssub_2, %47:spr, %subreg.ssub_3
...

@arsenm
Copy link
Contributor Author

arsenm commented Feb 22, 2025

LLVM Buildbot has detected a new failure on builder llvm-clang-x86_64-expensive-checks-ubuntu running on as-builder-4 while building llvm at step 6 "test-build-unified-tree-check-all".

Full details are available at: https://lab.llvm.org/buildbot/#/builders/187/builds/4471

Here is the relevant piece of the build log for the reference

Expensive check failure fixed by #128279

@llvm-ci
Copy link
Collaborator

llvm-ci commented Feb 22, 2025

LLVM Buildbot has detected a new failure on builder llvm-clang-x86_64-expensive-checks-debian running on gribozavr4 while building llvm at step 6 "test-build-unified-tree-check-all".

Full details are available at: https://lab.llvm.org/buildbot/#/builders/16/builds/14186

Here is the relevant piece of the build log for the reference
Step 6 (test-build-unified-tree-check-all) failure: test (failure)
******************** TEST 'LLVM :: CodeGen/Thumb2/mve-complex-deinterleaving-mixed-cases.ll' FAILED ********************
Exit Code: 2

Command Output (stderr):
--
RUN: at line 2: /b/1/llvm-clang-x86_64-expensive-checks-debian/build/bin/llc < /b/1/llvm-clang-x86_64-expensive-checks-debian/llvm-project/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-mixed-cases.ll --mattr=+mve.fp,+fp64 -o - | /b/1/llvm-clang-x86_64-expensive-checks-debian/build/bin/FileCheck /b/1/llvm-clang-x86_64-expensive-checks-debian/llvm-project/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-mixed-cases.ll
+ /b/1/llvm-clang-x86_64-expensive-checks-debian/build/bin/llc --mattr=+mve.fp,+fp64 -o -
+ /b/1/llvm-clang-x86_64-expensive-checks-debian/build/bin/FileCheck /b/1/llvm-clang-x86_64-expensive-checks-debian/llvm-project/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-mixed-cases.ll

# After Peephole Optimizations
# Machine code for function mul_divequal: IsSSA, TracksLiveness
Frame Objects:
  fi#-2: size=16, align=8, fixed, at location [SP+16]
  fi#-1: size=16, align=8, fixed, at location [SP]
Function Live Ins: $r0 in %0, $r1 in %1, $r2 in %2, $r3 in %3

bb.0.entry:
  liveins: $r0, $r1, $r2, $r3
  %3:gpr = COPY $r3
  %2:gpr = COPY $r2
  %1:gpr = COPY $r1
  %0:gpr = COPY $r0
  %4:rgpr = t2ADDri %fixed-stack.0, 0, 14, $noreg, $noreg
  %5:dpr = VMOVDRR %2:gpr, %3:gpr, 14, $noreg
  %6:dpr = VMOVDRR %0:gpr, %1:gpr, 14, $noreg
  %7:rgpr = t2ADDri %fixed-stack.1, 0, 14, $noreg, $noreg
  %8:mqpr = MVE_VLDRWU32 killed %7:rgpr, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.1, align 8)
  %9:spr = COPY %8.ssub_2:mqpr
  %10:spr = COPY %8.ssub_0:mqpr
  %14:spr = IMPLICIT_DEF
  %16:spr = IMPLICIT_DEF
  %13:mqpr = REG_SEQUENCE %8.ssub_0:mqpr, %subreg.ssub_0, %9:spr, %subreg.ssub_1, %14:spr, %subreg.ssub_2, %16:spr, %subreg.ssub_3
  %18:spr = COPY %8.ssub_3:mqpr
  %19:spr = COPY %8.ssub_1:mqpr
  %23:spr = IMPLICIT_DEF
  %25:spr = IMPLICIT_DEF
  %22:mqpr = REG_SEQUENCE %19:spr, %subreg.ssub_0, %18:spr, %subreg.ssub_1, %23:spr, %subreg.ssub_2, %25:spr, %subreg.ssub_3
  %27:mqpr = MVE_VLDRWU32 killed %4:rgpr, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
  %28:spr = COPY %27.ssub_3:mqpr
  %30:mqpr = REG_SEQUENCE killed %6:dpr, %subreg.dsub_0, %5:dpr, %subreg.dsub_1
  %31:spr = COPY %30.ssub_3:mqpr
  %32:spr = COPY %30.ssub_1:mqpr
  %36:spr = IMPLICIT_DEF
  %38:spr = IMPLICIT_DEF
  %35:mqpr = REG_SEQUENCE %32:spr, %subreg.ssub_0, %5.ssub_1:dpr, %subreg.ssub_1, %36:spr, %subreg.ssub_2, %38:spr, %subreg.ssub_3
  %40:spr = COPY %30.ssub_2:mqpr
  %41:spr = COPY %30.ssub_0:mqpr
  %45:spr = IMPLICIT_DEF
  %47:spr = IMPLICIT_DEF
  %44:mqpr = REG_SEQUENCE %30.ssub_0:mqpr, %subreg.ssub_0, %40:spr, %subreg.ssub_1, %45:spr, %subreg.ssub_2, %47:spr, %subreg.ssub_3
...

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

5 participants