Skip to content

[AMDGPU] Recognise bitmask operations as srcmods on select #152119

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 38 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3212,6 +3212,44 @@ bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src,
Src = Src.getOperand(0);
}

if (Mods != SISrcMods::NONE)
return true;

// Convert various sign-bit masks on integers to src mods. Currently disabled
// for 16-bit types as the codegen replaces the operand without adding a
// srcmod. This is intentionally finding the cases where we are performing
// float neg and abs on int types, the goal is not to obtain two's complement
// neg or abs. Limit converison to select operands via the nonCanonalizing
// pattern.
// TODO: Add 16-bit support.
if (IsCanonicalizing)
return true;

unsigned Opc = Src->getOpcode();
EVT VT = Src.getValueType();
if ((Opc != ISD::AND && Opc != ISD::OR && Opc != ISD::XOR) ||
(VT != MVT::i32 && VT != MVT::i64))
return true;

ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Src->getOperand(1));
if (!CRHS)
return true;

// Recognise (xor a, 0x80000000) as NEG SrcMod.
// Recognise (and a, 0x7fffffff) as ABS SrcMod.
// Recognise (or a, 0x80000000) as NEG+ABS SrcModifiers.
if (Opc == ISD::XOR && CRHS->getAPIntValue().isSignMask()) {
Mods |= SISrcMods::NEG;
Src = Src.getOperand(0);
} else if (Opc == ISD::AND && AllowAbs &&
CRHS->getAPIntValue().isMaxSignedValue()) {
Mods |= SISrcMods::ABS;
Src = Src.getOperand(0);
} else if (Opc == ISD::OR && AllowAbs && CRHS->getAPIntValue().isSignMask()) {
Mods |= SISrcMods::ABS | SISrcMods::NEG;
Src = Src.getOperand(0);
}

return true;
}

Expand Down
41 changes: 15 additions & 26 deletions llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
Original file line number Diff line number Diff line change
Expand Up @@ -349,29 +349,24 @@ define i32 @select_fneg_xor_select_i32(i1 %cond0, i1 %cond1, i32 %arg0, i32 %arg
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
; GCN-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GCN-NEXT: v_and_b32_e32 v1, 1, v1
; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GCN-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GCN-NEXT: v_cndmask_b32_e64 v0, -v2, v3, vcc
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1
; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
; GCN-NEXT: v_cndmask_b32_e64 v0, v0, -v0, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: select_fneg_xor_select_i32:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
; GFX11-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
; GFX11-NEXT: v_and_b32_e32 v1, 1, v1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cndmask_b32_e64 v0, -v2, v3, vcc_lo
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1
; GFX11-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, -v0, vcc_lo
; GFX11-NEXT: s_setpc_b64 s[30:31]
%fneg0 = xor i32 %arg0, -2147483648
%select0 = select i1 %cond0, i32 %arg1, i32 %fneg0
Expand Down Expand Up @@ -550,31 +545,25 @@ define i64 @select_fneg_xor_select_i64(i1 %cond0, i1 %cond1, i64 %arg0, i64 %arg
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
; GCN-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GCN-NEXT: v_and_b32_e32 v1, 1, v1
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GCN-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc
; GCN-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
; GCN-NEXT: v_cndmask_b32_e64 v2, -v3, v5, vcc
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1
; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
; GCN-NEXT: v_cndmask_b32_e64 v1, v2, -v2, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: select_fneg_xor_select_i64:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
; GFX11-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
; GFX11-NEXT: v_and_b32_e32 v1, 1, v1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_and_b32 v1, 1, v1
; GFX11-NEXT: v_cndmask_b32_e64 v2, -v3, v5, vcc_lo
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1
; GFX11-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, -v2, vcc_lo
; GFX11-NEXT: s_setpc_b64 s[30:31]
%fneg0 = xor i64 %arg0, 9223372036854775808
%select0 = select i1 %cond0, i64 %arg1, i64 %fneg0
Expand Down
93 changes: 93 additions & 0 deletions llvm/test/CodeGen/AMDGPU/integer-canonicalizing-src-modifiers.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 < %s | FileCheck -check-prefixes=GCN,GFX7 %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s

; Demonstrate that the conversion of bitmasks affecting the sign bit on integers to srcmods
; does not apply to canonicalizing instructions.

define double @v_uitofp_i32_to_f64_abs(i32 %arg0) nounwind {
; GCN-LABEL: v_uitofp_i32_to_f64_abs:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], v0
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_uitofp_i32_to_f64_abs:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%arg0.abs = and i32 %arg0, u0x7fffffff
%cvt = uitofp i32 %arg0.abs to double
ret double %cvt
}

define double @v_uitofp_i32_to_f64_neg(i32 %arg0) nounwind {
; GCN-LABEL: v_uitofp_i32_to_f64_neg:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_and_b32_e32 v0, 0x80000000, v0
; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], v0
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_uitofp_i32_to_f64_neg:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_and_b32_e32 v0, 0x80000000, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%arg0.neg = and i32 %arg0, u0x80000000
%cvt = uitofp i32 %arg0.neg to double
ret double %cvt
}

define double @s_uitofp_i32_to_f64_abs(i32 inreg %arg0) nounwind {
; GCN-LABEL: s_uitofp_i32_to_f64_abs:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: s_bitset0_b32 s16, 31
; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], s16
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: s_uitofp_i32_to_f64_abs:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_bitset0_b32 s0, 31
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%arg0.abs = and i32 %arg0, u0x7fffffff
%cvt = uitofp i32 %arg0.abs to double
ret double %cvt
}

define double @s_uitofp_i32_to_f64_neg(i32 inreg %arg0) nounwind {
; GCN-LABEL: s_uitofp_i32_to_f64_neg:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: s_and_b32 s4, s16, 0x80000000
; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], s4
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: s_uitofp_i32_to_f64_neg:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_and_b32 s0, s0, 0x80000000
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%arg0.neg = and i32 %arg0, u0x80000000
%cvt = uitofp i32 %arg0.neg to double
ret double %cvt
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX11-FAKE16: {{.*}}
; GFX11-TRUE16: {{.*}}
; GFX7: {{.*}}
; GFX9: {{.*}}
Loading