Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 0 additions & 6 deletions llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16806,12 +16806,6 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
return FrozenN0;
}

// We currently avoid folding freeze over SRA/SRL, due to the problems seen
// with (freeze (assert ext)) blocking simplifications of SRA/SRL. See for
// example https://reviews.llvm.org/D136529#4120959.
if (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)
return SDValue();

// Fold freeze(op(x, ...)) -> op(freeze(x), ...).
// Try to push freeze through instructions that propagate but don't produce
// poison as far as possible. If an operand of freeze follows three
Expand Down
14 changes: 6 additions & 8 deletions llvm/test/CodeGen/AMDGPU/divergence-driven-trunc-to-i1.ll
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,13 @@ define amdgpu_kernel void @uniform_trunc_i16_to_i1(ptr addrspace(1) %out, i16 %x
; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
; GCN-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY2]], %subreg.sub0, killed [[COPY1]], %subreg.sub1, killed [[S_MOV_B32_1]], %subreg.sub2, killed [[S_MOV_B32_]], %subreg.sub3
; GCN-NEXT: [[S_SEXT_I32_I16_:%[0-9]+]]:sreg_32 = S_SEXT_I32_I16 [[S_LOAD_DWORD_IMM]]
; GCN-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 16
; GCN-NEXT: [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 [[S_LOAD_DWORD_IMM]], killed [[S_MOV_B32_2]], implicit-def dead $scc
; GCN-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY killed [[S_LSHR_B32_]]
; GCN-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY3]], implicit-def dead $scc
; GCN-NEXT: S_CMP_EQ_U32 killed [[S_AND_B32_]], 1, implicit-def $scc
; GCN-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY killed [[S_LOAD_DWORD_IMM]]
; GCN-NEXT: [[S_SEXT_I32_I16_:%[0-9]+]]:sreg_32 = S_SEXT_I32_I16 [[COPY3]]
; GCN-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 65536, [[COPY3]], implicit-def dead $scc
; GCN-NEXT: S_CMP_LG_U32 killed [[S_AND_B32_]], 0, implicit-def $scc
; GCN-NEXT: [[COPY4:%[0-9]+]]:sreg_64 = COPY $scc
; GCN-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
; GCN-NEXT: S_CMP_LT_I32 killed [[S_SEXT_I32_I16_]], killed [[S_MOV_B32_3]], implicit-def $scc
; GCN-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0
; GCN-NEXT: S_CMP_LT_I32 killed [[S_SEXT_I32_I16_]], killed [[S_MOV_B32_2]], implicit-def $scc
; GCN-NEXT: [[COPY5:%[0-9]+]]:sreg_64 = COPY $scc
; GCN-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[COPY5]], killed [[COPY4]], implicit-def dead $scc
; GCN-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed [[S_OR_B64_]], implicit $exec
Expand Down
118 changes: 59 additions & 59 deletions llvm/test/CodeGen/AMDGPU/srem64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -653,11 +653,11 @@ define i64 @v_test_srem24_64(i64 %x, i64 %y) {
define amdgpu_kernel void @s_test_srem25_64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-LABEL: s_test_srem25_64:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dword s0, s[4:5], 0xe
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_ashr_i32 s0, s0, 7
; GCN-NEXT: s_ashr_i32 s0, s1, 7
; GCN-NEXT: s_abs_i32 s8, s0
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s8
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
Expand Down Expand Up @@ -694,11 +694,11 @@ define amdgpu_kernel void @s_test_srem25_64(ptr addrspace(1) %out, i64 %x, i64 %
;
; GCN-IR-LABEL: s_test_srem25_64:
; GCN-IR: ; %bb.0:
; GCN-IR-NEXT: s_load_dword s0, s[4:5], 0xe
; GCN-IR-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd
; GCN-IR-NEXT: s_mov_b32 s7, 0xf000
; GCN-IR-NEXT: s_mov_b32 s6, -1
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
; GCN-IR-NEXT: s_ashr_i32 s0, s0, 7
; GCN-IR-NEXT: s_ashr_i32 s0, s1, 7
; GCN-IR-NEXT: s_abs_i32 s8, s0
; GCN-IR-NEXT: v_cvt_f32_u32_e32 v0, s8
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
Expand Down Expand Up @@ -742,11 +742,11 @@ define amdgpu_kernel void @s_test_srem25_64(ptr addrspace(1) %out, i64 %x, i64 %
define amdgpu_kernel void @s_test_srem31_64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-LABEL: s_test_srem31_64:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dword s0, s[4:5], 0xe
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_ashr_i32 s0, s0, 1
; GCN-NEXT: s_ashr_i32 s0, s1, 1
; GCN-NEXT: s_abs_i32 s8, s0
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s8
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
Expand Down Expand Up @@ -783,11 +783,11 @@ define amdgpu_kernel void @s_test_srem31_64(ptr addrspace(1) %out, i64 %x, i64 %
;
; GCN-IR-LABEL: s_test_srem31_64:
; GCN-IR: ; %bb.0:
; GCN-IR-NEXT: s_load_dword s0, s[4:5], 0xe
; GCN-IR-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd
; GCN-IR-NEXT: s_mov_b32 s7, 0xf000
; GCN-IR-NEXT: s_mov_b32 s6, -1
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
; GCN-IR-NEXT: s_ashr_i32 s0, s0, 1
; GCN-IR-NEXT: s_ashr_i32 s0, s1, 1
; GCN-IR-NEXT: s_abs_i32 s8, s0
; GCN-IR-NEXT: v_cvt_f32_u32_e32 v0, s8
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
Expand Down Expand Up @@ -832,11 +832,11 @@ define amdgpu_kernel void @s_test_srem31_64(ptr addrspace(1) %out, i64 %x, i64 %
define amdgpu_kernel void @s_test_srem32_64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-LABEL: s_test_srem32_64:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dword s0, s[4:5], 0xe
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_abs_i32 s8, s0
; GCN-NEXT: s_abs_i32 s8, s1
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s8
; GCN-NEXT: s_sub_i32 s0, 0, s8
; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0
Expand Down Expand Up @@ -871,11 +871,11 @@ define amdgpu_kernel void @s_test_srem32_64(ptr addrspace(1) %out, i64 %x, i64 %
;
; GCN-IR-LABEL: s_test_srem32_64:
; GCN-IR: ; %bb.0:
; GCN-IR-NEXT: s_load_dword s0, s[4:5], 0xe
; GCN-IR-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd
; GCN-IR-NEXT: s_mov_b32 s7, 0xf000
; GCN-IR-NEXT: s_mov_b32 s6, -1
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
; GCN-IR-NEXT: s_abs_i32 s8, s0
; GCN-IR-NEXT: s_abs_i32 s8, s1
; GCN-IR-NEXT: v_cvt_f32_u32_e32 v0, s8
; GCN-IR-NEXT: s_sub_i32 s0, 0, s8
; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v0, v0
Expand Down Expand Up @@ -921,36 +921,34 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_ashr_i64 s[2:3], s[2:3], 31
; GCN-NEXT: s_ashr_i64 s[4:5], s[4:5], 31
; GCN-NEXT: s_ashr_i32 s6, s5, 31
; GCN-NEXT: s_add_u32 s4, s4, s6
; GCN-NEXT: s_mov_b32 s7, s6
; GCN-NEXT: s_addc_u32 s5, s5, s6
; GCN-NEXT: s_xor_b64 s[8:9], s[4:5], s[6:7]
; GCN-NEXT: s_ashr_i64 s[10:11], s[2:3], 31
; GCN-NEXT: s_ashr_i64 s[6:7], s[4:5], 31
; GCN-NEXT: s_ashr_i32 s4, s5, 31
; GCN-NEXT: s_ashr_i32 s5, s7, 31
; GCN-NEXT: s_add_u32 s6, s6, s4
; GCN-NEXT: s_addc_u32 s7, s7, s5
; GCN-NEXT: s_xor_b64 s[8:9], s[6:7], s[4:5]
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s8
; GCN-NEXT: v_cvt_f32_u32_e32 v1, s9
; GCN-NEXT: s_sub_u32 s4, 0, s8
; GCN-NEXT: s_subb_u32 s5, 0, s9
; GCN-NEXT: s_ashr_i32 s10, s3, 31
; GCN-NEXT: s_sub_u32 s2, 0, s8
; GCN-NEXT: s_subb_u32 s4, 0, s9
; GCN-NEXT: s_ashr_i32 s12, s3, 31
; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; GCN-NEXT: v_rcp_f32_e32 v0, v0
; GCN-NEXT: s_add_u32 s2, s2, s10
; GCN-NEXT: s_mov_b32 s11, s10
; GCN-NEXT: s_addc_u32 s3, s3, s10
; GCN-NEXT: s_mov_b32 s13, s12
; GCN-NEXT: s_mov_b32 s5, s1
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
; GCN-NEXT: v_trunc_f32_e32 v1, v1
; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0
; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
; GCN-NEXT: s_xor_b64 s[12:13], s[2:3], s[10:11]
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: v_mul_lo_u32 v2, s4, v1
; GCN-NEXT: v_mul_hi_u32 v3, s4, v0
; GCN-NEXT: v_mul_lo_u32 v5, s5, v0
; GCN-NEXT: v_mul_lo_u32 v4, s4, v0
; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: v_mul_lo_u32 v2, s2, v1
; GCN-NEXT: v_mul_hi_u32 v3, s2, v0
; GCN-NEXT: v_mul_lo_u32 v5, s4, v0
; GCN-NEXT: v_mul_lo_u32 v4, s2, v0
; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3
; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v5
; GCN-NEXT: v_mul_hi_u32 v3, v0, v4
Expand All @@ -969,12 +967,12 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2
; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GCN-NEXT: v_mul_lo_u32 v2, s4, v1
; GCN-NEXT: v_mul_hi_u32 v3, s4, v0
; GCN-NEXT: v_mul_lo_u32 v4, s5, v0
; GCN-NEXT: s_mov_b32 s5, s1
; GCN-NEXT: v_mul_lo_u32 v2, s2, v1
; GCN-NEXT: v_mul_hi_u32 v3, s2, v0
; GCN-NEXT: v_mul_lo_u32 v4, s4, v0
; GCN-NEXT: s_mov_b32 s4, s0
; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3
; GCN-NEXT: v_mul_lo_u32 v3, s4, v0
; GCN-NEXT: v_mul_lo_u32 v3, s2, v0
; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4
; GCN-NEXT: v_mul_lo_u32 v6, v0, v2
; GCN-NEXT: v_mul_hi_u32 v7, v0, v3
Expand All @@ -990,18 +988,20 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2
; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GCN-NEXT: s_add_u32 s2, s10, s12
; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2
; GCN-NEXT: s_addc_u32 s3, s11, s12
; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GCN-NEXT: v_mul_lo_u32 v2, s12, v1
; GCN-NEXT: v_mul_hi_u32 v3, s12, v0
; GCN-NEXT: v_mul_hi_u32 v4, s12, v1
; GCN-NEXT: v_mul_hi_u32 v5, s13, v1
; GCN-NEXT: v_mul_lo_u32 v1, s13, v1
; GCN-NEXT: s_xor_b64 s[10:11], s[2:3], s[12:13]
; GCN-NEXT: v_mul_lo_u32 v2, s10, v1
; GCN-NEXT: v_mul_hi_u32 v3, s10, v0
; GCN-NEXT: v_mul_hi_u32 v4, s10, v1
; GCN-NEXT: v_mul_hi_u32 v5, s11, v1
; GCN-NEXT: v_mul_lo_u32 v1, s11, v1
; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2
; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GCN-NEXT: v_mul_lo_u32 v4, s13, v0
; GCN-NEXT: v_mul_hi_u32 v0, s13, v0
; GCN-NEXT: s_mov_b32 s4, s0
; GCN-NEXT: v_mul_lo_u32 v4, s11, v0
; GCN-NEXT: v_mul_hi_u32 v0, s11, v0
; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4
; GCN-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc
; GCN-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
Expand All @@ -1013,9 +1013,9 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-NEXT: v_mul_lo_u32 v0, s8, v0
; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v2
; GCN-NEXT: v_add_i32_e32 v1, vcc, v3, v1
; GCN-NEXT: v_sub_i32_e32 v2, vcc, s13, v1
; GCN-NEXT: v_sub_i32_e32 v2, vcc, s11, v1
; GCN-NEXT: v_mov_b32_e32 v3, s9
; GCN-NEXT: v_sub_i32_e32 v0, vcc, s12, v0
; GCN-NEXT: v_sub_i32_e32 v0, vcc, s10, v0
; GCN-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
; GCN-NEXT: v_subrev_i32_e64 v4, s[0:1], s8, v0
; GCN-NEXT: v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
Expand All @@ -1030,7 +1030,7 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-NEXT: v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v6
; GCN-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[0:1]
; GCN-NEXT: v_mov_b32_e32 v4, s13
; GCN-NEXT: v_mov_b32_e32 v4, s11
; GCN-NEXT: v_subb_u32_e32 v1, vcc, v4, v1, vcc
; GCN-NEXT: v_cmp_le_u32_e32 vcc, s9, v1
; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
Expand All @@ -1042,36 +1042,36 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
; GCN-NEXT: v_xor_b32_e32 v0, s10, v0
; GCN-NEXT: v_xor_b32_e32 v1, s10, v1
; GCN-NEXT: v_mov_b32_e32 v2, s10
; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s10, v0
; GCN-NEXT: v_xor_b32_e32 v0, s12, v0
; GCN-NEXT: v_xor_b32_e32 v1, s12, v1
; GCN-NEXT: v_mov_b32_e32 v2, s12
; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s12, v0
; GCN-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc
; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GCN-NEXT: s_endpgm
;
; GCN-IR-LABEL: s_test_srem33_64:
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GCN-IR-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
; GCN-IR-NEXT: s_mov_b32 s13, 0
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
; GCN-IR-NEXT: s_ashr_i64 s[2:3], s[2:3], 31
; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[4:5], 31
; GCN-IR-NEXT: s_ashr_i32 s4, s3, 31
; GCN-IR-NEXT: s_ashr_i64 s[6:7], s[2:3], 31
; GCN-IR-NEXT: s_mov_b32 s5, s4
; GCN-IR-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5]
; GCN-IR-NEXT: s_ashr_i64 s[10:11], s[8:9], 31
; GCN-IR-NEXT: s_xor_b64 s[2:3], s[6:7], s[4:5]
; GCN-IR-NEXT: s_sub_u32 s6, s2, s4
; GCN-IR-NEXT: s_subb_u32 s7, s3, s4
; GCN-IR-NEXT: s_ashr_i32 s2, s9, 31
; GCN-IR-NEXT: s_mov_b32 s3, s2
; GCN-IR-NEXT: s_xor_b64 s[8:9], s[8:9], s[2:3]
; GCN-IR-NEXT: s_xor_b64 s[8:9], s[10:11], s[2:3]
; GCN-IR-NEXT: s_sub_u32 s8, s8, s2
; GCN-IR-NEXT: s_subb_u32 s9, s9, s2
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[2:3], s[8:9], 0
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[2:3], s[6:7], 0
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 0
; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[8:9]
; GCN-IR-NEXT: s_or_b64 s[10:11], s[2:3], s[10:11]
; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[2:3]
; GCN-IR-NEXT: s_flbit_i32_b64 s20, s[6:7]
; GCN-IR-NEXT: s_sub_u32 s14, s12, s20
; GCN-IR-NEXT: s_subb_u32 s15, 0, 0
Expand Down
15 changes: 9 additions & 6 deletions llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,8 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: slli a2, a1, 16
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: srliw a1, a1, 24
; RV64I-NEXT: slli a1, a1, 34
; RV64I-NEXT: srli a1, a1, 58
; RV64I-NEXT: xori a1, a1, 31
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: or a0, a0, a1
Expand Down Expand Up @@ -270,17 +271,19 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srliw a0, a0, 1
; RV64I-NEXT: beqz a0, .LBB4_2
; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: addi a1, a2, 1365
; RV64I-NEXT: srliw a2, a0, 2
; RV64I-NEXT: srli a2, a0, 2
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: srliw a2, a0, 4
; RV64I-NEXT: srli a2, a0, 4
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: srliw a2, a0, 8
; RV64I-NEXT: slli a2, a0, 33
; RV64I-NEXT: srli a2, a2, 41
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: srliw a2, a0, 16
; RV64I-NEXT: slli a2, a0, 33
; RV64I-NEXT: srli a2, a2, 49
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: not a0, a0
; RV64I-NEXT: srli a2, a0, 1
Expand Down
15 changes: 9 additions & 6 deletions llvm/test/CodeGen/RISCV/rv64zbb.ll
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,8 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: slli a2, a1, 16
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: srliw a1, a1, 24
; RV64I-NEXT: slli a1, a1, 34
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I spent some time with this yesterday. The issue is that we don't push the freeze up the pairs of slli+add. I was able to trick it a bit by emitting a freeze before the slli+add pairs are emitted in expandCTPOP.

; RV64I-NEXT: srli a1, a1, 58
; RV64I-NEXT: xori a1, a1, 31
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: or a0, a0, a1
Expand All @@ -231,17 +232,19 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srliw a0, a0, 1
; RV64I-NEXT: beqz a0, .LBB4_2
; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: addi a1, a2, 1365
; RV64I-NEXT: srliw a2, a0, 2
; RV64I-NEXT: srli a2, a0, 2
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: srliw a2, a0, 4
; RV64I-NEXT: srli a2, a0, 4
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: srliw a2, a0, 8
; RV64I-NEXT: slli a2, a0, 33
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the issue here is that we moved a freeze in the entry block which allowed computeKnownBits to compute a value for the output of the block. Then an AssertZExt was emitted in the the cond.false block. This allowed us to remove some bits from the AND mask and our isel code for srliw doesn't use computeKnownBits to fill in missing bits.

; RV64I-NEXT: srli a2, a2, 41
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: srliw a2, a0, 16
; RV64I-NEXT: slli a2, a0, 33
; RV64I-NEXT: srli a2, a2, 49
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: not a0, a0
; RV64I-NEXT: srli a2, a0, 1
Expand Down
Loading