From e8f1125612cc1f0bd225c580656af58b879b1a7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= Date: Fri, 11 Jul 2025 11:47:56 +0200 Subject: [PATCH 1/2] Pre-Commit test: SWDEV-542372: crash in si-fold-operands --- .../AMDGPU/si-fold-operands-swdev-542372.ll | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll diff --git a/llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll b/llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll new file mode 100644 index 0000000000000..35d70f20bffb2 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll @@ -0,0 +1,39 @@ +; RUN: not --crash llc -O3 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a %s -o /dev/null + +@lds = internal addrspace(3) global [5 x i32] poison + +define amdgpu_kernel void @kernel() { +entry: + %load.lds.0 = load <2 x i32>, ptr addrspace(3) @lds + %vecext.i55 = extractelement <2 x i32> %load.lds.0, i64 0 + %cmp3.i57 = icmp eq i32 %vecext.i55, 2 + store i32 0, ptr addrspace(3) @lds + br i1 %cmp3.i57, label %land.rhs49, label %land.end59 + +land.rhs49: ; preds = %entry + %load.lds.1 = load <2 x i32>, ptr addrspace(3) @lds + %vecext.i67 = extractelement <2 x i32> %load.lds.1, i64 0 + %cmp3.i69 = icmp eq i32 %vecext.i67, 1 + br i1 %cmp3.i69, label %land.rhs57, label %land.end59 + +land.rhs57: ; preds = %land.rhs49 + %rem.i.i.i = srem <2 x i32> %load.lds.0, %load.lds.1 + %ref.tmp.sroa.0.0.vec.extract.i.i = extractelement <2 x i32> %rem.i.i.i, i64 0 + store i32 %ref.tmp.sroa.0.0.vec.extract.i.i, ptr addrspace(3) @lds + store i32 %ref.tmp.sroa.0.0.vec.extract.i.i, ptr addrspace(3) getelementptr inbounds nuw (i8, ptr addrspace(3) @lds, i32 4) + %load.lds.2 = load <2 x i32>, ptr addrspace(3) @lds + %vecext.i.i.i = extractelement <2 x i32> %load.lds.2, i64 0 + %cmp3.i.i.i = icmp ne i32 %vecext.i.i.i, 0 + %vecext.1.i.i.i = extractelement <2 x i32> %load.lds.2, i64 1 + %cmp3.1.i.i.i = icmp ne i32 %vecext.1.i.i.i, 0 + %.not.i.i = select i1 %cmp3.i.i.i, i1 true, i1 %cmp3.1.i.i.i + br i1 %.not.i.i, label %land.end59, label %if.end.i + +if.end.i: ; preds = %land.rhs57 + %and.i.i.i = and <2 x i32> %load.lds.2, splat (i32 1) + %ref.tmp.sroa.0.0.vec.extract.i20.i = extractelement <2 x i32> %and.i.i.i, i64 0 + br label %land.end59 + +land.end59: ; preds = %if.end.i, %land.rhs57, %land.rhs49, %entry + ret void +} From 2f8a8887a1b83c804c5328d0cb419d7c05abb2db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= Date: Fri, 11 Jul 2025 12:30:59 +0200 Subject: [PATCH 2/2] Revert "AMDGPU: Try constant fold after folding immediate (#141862)" This reverts commit 80064b6e326d0cf34bac1d09c12fc1e6abecb7af. The patch triggers a crash when the folded use can have 2 operands in the fold list. See https://github.com/llvm/llvm-project/pull/148187 for more info. SWDEV-542372 --- llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | 6 ----- .../AMDGPU/bit-op-reduce-width-known-bits.ll | 3 ++- .../AMDGPU/constant-fold-imm-immreg.mir | 2 +- llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir | 3 ++- .../fold-zero-high-bits-skips-non-reg.mir | 4 +-- llvm/test/CodeGen/AMDGPU/sdiv64.ll | 7 ++--- .../AMDGPU/si-fold-operands-swdev-542372.ll | 26 ++++++++++++++++++- llvm/test/CodeGen/AMDGPU/srem64.ll | 7 ++--- llvm/test/CodeGen/AMDGPU/udiv64.ll | 3 ++- llvm/test/CodeGen/AMDGPU/urem64.ll | 7 ++--- 10 files changed, 46 insertions(+), 22 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index 0ed06c37507af..b8fecc382c64a 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -1782,12 +1782,6 @@ bool SIFoldOperandsImpl::foldInstOperand(MachineInstr &MI, LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " << static_cast(Fold.UseOpNo) << " of " << *Fold.UseMI); - - if (Fold.isImm() && tryConstantFoldOp(Fold.UseMI)) { - LLVM_DEBUG(dbgs() << "Constant folded " << *Fold.UseMI); - Changed = true; - } - } else if (Fold.Commuted) { // Restoring instruction's original operand order if fold has failed. TII->commuteInstruction(*Fold.UseMI, false); diff --git a/llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll b/llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll index ad26dfa7f93e8..ac5f9b6b483eb 100644 --- a/llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll +++ b/llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll @@ -105,8 +105,9 @@ define i64 @v_xor_i64_known_i32_from_range_use_out_of_block(i64 %x) { ; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc ; CHECK-NEXT: ; %bb.1: ; %inc ; CHECK-NEXT: v_not_b32_e32 v2, v4 +; CHECK-NEXT: v_not_b32_e32 v3, 0 ; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2 -; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v1, vcc +; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc ; CHECK-NEXT: ; %bb.2: ; %UnifiedReturnBlock ; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] ; CHECK-NEXT: v_mov_b32_e32 v0, v2 diff --git a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir index e7177a5e7160e..e10707de4d986 100644 --- a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir +++ b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir @@ -973,7 +973,7 @@ body: | ; GCN: liveins: $vgpr0, $vgpr1 ; GCN-NEXT: {{ $}} ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 0, [[COPY]], implicit $exec ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY1]] %0:vgpr_32 = COPY $vgpr0 %1:vgpr_32 = COPY $vgpr1 diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir index 74c4a2da50221..d00fd9b967f37 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir @@ -43,7 +43,8 @@ body: | ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[DEF]], %subreg.sub0, killed [[V_MOV_B32_e32_]], %subreg.sub1 - ; GCN-NEXT: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[DEF2]], [[REG_SEQUENCE]].sub0, implicit $exec + ; GCN-NEXT: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 0, [[DEF1]], implicit $exec + ; GCN-NEXT: [[V_XOR_B32_e32_1:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[DEF2]], [[REG_SEQUENCE]].sub0, implicit $exec %0:vgpr_32 = IMPLICIT_DEF %1:vgpr_32 = IMPLICIT_DEF %2:vgpr_32 = IMPLICIT_DEF diff --git a/llvm/test/CodeGen/AMDGPU/fold-zero-high-bits-skips-non-reg.mir b/llvm/test/CodeGen/AMDGPU/fold-zero-high-bits-skips-non-reg.mir index dc03eb74cbf11..b1aa88969c5bb 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-zero-high-bits-skips-non-reg.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-zero-high-bits-skips-non-reg.mir @@ -8,8 +8,8 @@ body: | ; CHECK-LABEL: name: test_tryFoldZeroHighBits_skips_nonreg ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub0, [[V_MOV_B32_e32_]], %subreg.sub1 - ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec - ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 65535, 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_AND_B32_e64_]] %0:vgpr_32 = V_MOV_B32_e32 0, implicit $exec %1:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %0, %subreg.sub1 %2:vgpr_32 = V_AND_B32_e64 65535, %1.sub0, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll index df496258a2509..15eb41a1a5b65 100644 --- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll +++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll @@ -404,11 +404,12 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) { ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, -1, v0 ; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, -1, v1, vcc -; GCN-IR-NEXT: v_not_b32_e32 v4, v10 +; GCN-IR-NEXT: v_not_b32_e32 v5, v10 ; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[6:7], v8 -; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, v4, v11 +; GCN-IR-NEXT: v_not_b32_e32 v4, 0 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, v5, v11 ; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 -; GCN-IR-NEXT: v_addc_u32_e64 v7, s[4:5], -1, 0, vcc +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v4, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 ; GCN-IR-NEXT: v_mov_b32_e32 v11, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 diff --git a/llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll b/llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll index 35d70f20bffb2..24e27f352f494 100644 --- a/llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll +++ b/llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll @@ -1,8 +1,32 @@ -; RUN: not --crash llc -O3 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a %s -o /dev/null +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -O3 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a %s -o - | FileCheck %s @lds = internal addrspace(3) global [5 x i32] poison define amdgpu_kernel void @kernel() { +; CHECK-LABEL: kernel: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: ds_read_b64 v[2:3], v0 +; CHECK-NEXT: ds_write_b32 v0, v0 +; CHECK-NEXT: s_waitcnt lgkmcnt(1) +; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 2, v2 +; CHECK-NEXT: s_cbranch_vccnz .LBB0_3 +; CHECK-NEXT: ; %bb.1: ; %land.rhs49 +; CHECK-NEXT: ds_read_b64 v[0:1], v0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 1, v0 +; CHECK-NEXT: s_cbranch_vccnz .LBB0_3 +; CHECK-NEXT: ; %bb.2: ; %land.rhs57 +; CHECK-NEXT: s_mov_b32 s0, 0 +; CHECK-NEXT: s_mov_b32 s1, s0 +; CHECK-NEXT: v_mov_b32_e32 v2, 0 +; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1] +; CHECK-NEXT: s_or_b32 s0, 0, 0 +; CHECK-NEXT: s_cmp_lg_u32 s0, 0 +; CHECK-NEXT: ds_write_b64 v2, v[0:1] +; CHECK-NEXT: .LBB0_3: ; %land.end59 +; CHECK-NEXT: s_endpgm entry: %load.lds.0 = load <2 x i32>, ptr addrspace(3) @lds %vecext.i55 = extractelement <2 x i32> %load.lds.0, i64 0 diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll index 47dfa9f4fc2d3..c729c3fb8a4e4 100644 --- a/llvm/test/CodeGen/AMDGPU/srem64.ll +++ b/llvm/test/CodeGen/AMDGPU/srem64.ll @@ -380,11 +380,12 @@ define i64 @v_test_srem(i64 %x, i64 %y) { ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, -1, v2 ; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, -1, v3, vcc -; GCN-IR-NEXT: v_not_b32_e32 v6, v12 +; GCN-IR-NEXT: v_not_b32_e32 v7, v12 ; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v6, v13 +; GCN-IR-NEXT: v_not_b32_e32 v6, 0 +; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v7, v13 ; GCN-IR-NEXT: v_mov_b32_e32 v12, 0 -; GCN-IR-NEXT: v_addc_u32_e64 v9, s[4:5], -1, 0, vcc +; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v6, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 ; GCN-IR-NEXT: v_mov_b32_e32 v13, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll index e9017939f8a4a..5acbb044c1057 100644 --- a/llvm/test/CodeGen/AMDGPU/udiv64.ll +++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll @@ -348,9 +348,10 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) { ; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v10 ; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v3, vcc ; GCN-IR-NEXT: v_not_b32_e32 v0, v14 +; GCN-IR-NEXT: v_not_b32_e32 v1, 0 ; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, v0, v15 ; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 -; GCN-IR-NEXT: v_addc_u32_e64 v1, s[4:5], -1, 0, vcc +; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 ; GCN-IR-NEXT: v_mov_b32_e32 v11, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll index 6480a88d40f5a..94f1b83ea2765 100644 --- a/llvm/test/CodeGen/AMDGPU/urem64.ll +++ b/llvm/test/CodeGen/AMDGPU/urem64.ll @@ -355,11 +355,12 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) { ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v2 ; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v3, vcc -; GCN-IR-NEXT: v_not_b32_e32 v6, v12 +; GCN-IR-NEXT: v_not_b32_e32 v7, v12 ; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v6, v13 +; GCN-IR-NEXT: v_not_b32_e32 v6, 0 +; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v7, v13 ; GCN-IR-NEXT: v_mov_b32_e32 v12, 0 -; GCN-IR-NEXT: v_addc_u32_e64 v9, s[4:5], -1, 0, vcc +; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v6, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 ; GCN-IR-NEXT: v_mov_b32_e32 v13, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v7, 0