Skip to content

Commit b705fe5

Browse files
committed
[TargetLowering][X86] TeachSimplifyDemandedBits to handle cases where only the sign bit is demanded from a SETCC and can be passed through
If we're doing a compare that only tests the sign bit and only the sign bit is demanded, we can just bypass the node. This removes one of the blend dependencies in our v2i64->v2f32 uint_to_fp codegen on pre-sse4.2 targets. Differential Revision: https://reviews.llvm.org/D72356
1 parent 002be6c commit b705fe5

File tree

2 files changed

+67
-49
lines changed

2 files changed

+67
-49
lines changed

llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -726,6 +726,27 @@ SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
726726
return Op.getOperand(1);
727727
break;
728728
}
729+
case ISD::SETCC: {
730+
SDValue Op0 = Op.getOperand(0);
731+
SDValue Op1 = Op.getOperand(1);
732+
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
733+
// If (1) we only need the sign-bit, (2) the setcc operands are the same
734+
// width as the setcc result, and (3) the result of a setcc conforms to 0 or
735+
// -1, we may be able to bypass the setcc.
736+
if (DemandedBits.isSignMask() &&
737+
Op0.getScalarValueSizeInBits() == DemandedBits.getBitWidth() &&
738+
getBooleanContents(Op0.getValueType()) ==
739+
BooleanContent::ZeroOrNegativeOneBooleanContent) {
740+
// If we're testing X < 0, then this compare isn't needed - just use X!
741+
// FIXME: We're limiting to integer types here, but this should also work
742+
// if we don't care about FP signed-zero. The use of SETLT with FP means
743+
// that we don't care about NaNs.
744+
if (CC == ISD::SETLT && Op1.getValueType().isInteger() &&
745+
(isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode())))
746+
return Op0;
747+
}
748+
break;
749+
}
729750
case ISD::SIGN_EXTEND_INREG: {
730751
// If none of the extended bits are demanded, eliminate the sextinreg.
731752
EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();

llvm/test/CodeGen/X86/vec_int_to_fp.ll

Lines changed: 46 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -1881,25 +1881,25 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
18811881
;
18821882
; SSE41-LABEL: uitofp_2i64_to_4f32:
18831883
; SSE41: # %bb.0:
1884-
; SSE41-NEXT: movdqa %xmm0, %xmm1
1885-
; SSE41-NEXT: pxor %xmm0, %xmm0
1886-
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
1887-
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1,1]
1888-
; SSE41-NEXT: pand %xmm1, %xmm2
1889-
; SSE41-NEXT: movdqa %xmm1, %xmm3
1890-
; SSE41-NEXT: psrlq $1, %xmm3
1891-
; SSE41-NEXT: por %xmm2, %xmm3
1892-
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
1893-
; SSE41-NEXT: pextrq $1, %xmm1, %rax
1894-
; SSE41-NEXT: xorps %xmm2, %xmm2
1895-
; SSE41-NEXT: cvtsi2ss %rax, %xmm2
1896-
; SSE41-NEXT: movq %xmm1, %rax
1884+
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,1]
1885+
; SSE41-NEXT: pand %xmm0, %xmm1
1886+
; SSE41-NEXT: movdqa %xmm0, %xmm2
1887+
; SSE41-NEXT: pxor %xmm3, %xmm3
1888+
; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
1889+
; SSE41-NEXT: movdqa %xmm0, %xmm4
1890+
; SSE41-NEXT: psrlq $1, %xmm4
1891+
; SSE41-NEXT: por %xmm1, %xmm4
1892+
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
1893+
; SSE41-NEXT: pextrq $1, %xmm2, %rax
1894+
; SSE41-NEXT: xorps %xmm0, %xmm0
1895+
; SSE41-NEXT: cvtsi2ss %rax, %xmm0
1896+
; SSE41-NEXT: movq %xmm2, %rax
18971897
; SSE41-NEXT: xorps %xmm1, %xmm1
18981898
; SSE41-NEXT: cvtsi2ss %rax, %xmm1
1899-
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],zero,zero
1899+
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],zero,zero
19001900
; SSE41-NEXT: movaps %xmm1, %xmm2
19011901
; SSE41-NEXT: addps %xmm1, %xmm2
1902-
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
1902+
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
19031903
; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm1
19041904
; SSE41-NEXT: movaps %xmm1, %xmm0
19051905
; SSE41-NEXT: retq
@@ -2000,25 +2000,25 @@ define <4 x float> @uitofp_2i64_to_2f32(<2 x i64> %a) {
20002000
;
20012001
; SSE41-LABEL: uitofp_2i64_to_2f32:
20022002
; SSE41: # %bb.0:
2003-
; SSE41-NEXT: movdqa %xmm0, %xmm1
2004-
; SSE41-NEXT: pxor %xmm0, %xmm0
2005-
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
2006-
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1,1]
2007-
; SSE41-NEXT: pand %xmm1, %xmm2
2008-
; SSE41-NEXT: movdqa %xmm1, %xmm3
2009-
; SSE41-NEXT: psrlq $1, %xmm3
2010-
; SSE41-NEXT: por %xmm2, %xmm3
2011-
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
2012-
; SSE41-NEXT: pextrq $1, %xmm1, %rax
2013-
; SSE41-NEXT: xorps %xmm2, %xmm2
2014-
; SSE41-NEXT: cvtsi2ss %rax, %xmm2
2015-
; SSE41-NEXT: movq %xmm1, %rax
2003+
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,1]
2004+
; SSE41-NEXT: pand %xmm0, %xmm1
2005+
; SSE41-NEXT: movdqa %xmm0, %xmm2
2006+
; SSE41-NEXT: pxor %xmm3, %xmm3
2007+
; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
2008+
; SSE41-NEXT: movdqa %xmm0, %xmm4
2009+
; SSE41-NEXT: psrlq $1, %xmm4
2010+
; SSE41-NEXT: por %xmm1, %xmm4
2011+
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
2012+
; SSE41-NEXT: pextrq $1, %xmm2, %rax
2013+
; SSE41-NEXT: xorps %xmm0, %xmm0
2014+
; SSE41-NEXT: cvtsi2ss %rax, %xmm0
2015+
; SSE41-NEXT: movq %xmm2, %rax
20162016
; SSE41-NEXT: xorps %xmm1, %xmm1
20172017
; SSE41-NEXT: cvtsi2ss %rax, %xmm1
2018-
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],zero,zero
2018+
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],zero,zero
20192019
; SSE41-NEXT: movaps %xmm1, %xmm2
20202020
; SSE41-NEXT: addps %xmm1, %xmm2
2021-
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
2021+
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
20222022
; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm1
20232023
; SSE41-NEXT: movq {{.*#+}} xmm0 = xmm1[0],zero
20242024
; SSE41-NEXT: retq
@@ -2118,27 +2118,24 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
21182118
;
21192119
; SSE41-LABEL: uitofp_4i64_to_4f32_undef:
21202120
; SSE41: # %bb.0:
2121-
; SSE41-NEXT: movdqa %xmm0, %xmm1
2122-
; SSE41-NEXT: pxor %xmm0, %xmm0
2123-
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
2124-
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1,1]
2125-
; SSE41-NEXT: pand %xmm1, %xmm2
2126-
; SSE41-NEXT: movdqa %xmm1, %xmm3
2127-
; SSE41-NEXT: psrlq $1, %xmm3
2128-
; SSE41-NEXT: por %xmm2, %xmm3
2129-
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
2130-
; SSE41-NEXT: pextrq $1, %xmm1, %rax
2121+
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,1]
2122+
; SSE41-NEXT: pand %xmm0, %xmm1
2123+
; SSE41-NEXT: movdqa %xmm0, %xmm2
2124+
; SSE41-NEXT: psrlq $1, %xmm2
2125+
; SSE41-NEXT: por %xmm1, %xmm2
2126+
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
2127+
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm0
2128+
; SSE41-NEXT: pextrq $1, %xmm0, %rax
2129+
; SSE41-NEXT: cvtsi2ss %rax, %xmm3
2130+
; SSE41-NEXT: movq %xmm0, %rax
21312131
; SSE41-NEXT: xorps %xmm2, %xmm2
21322132
; SSE41-NEXT: cvtsi2ss %rax, %xmm2
2133-
; SSE41-NEXT: movq %xmm1, %rax
2134-
; SSE41-NEXT: xorps %xmm1, %xmm1
2135-
; SSE41-NEXT: cvtsi2ss %rax, %xmm1
2136-
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],zero,zero
2137-
; SSE41-NEXT: movaps %xmm1, %xmm2
2138-
; SSE41-NEXT: addps %xmm1, %xmm2
2139-
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
2140-
; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm1
2141-
; SSE41-NEXT: movaps %xmm1, %xmm0
2133+
; SSE41-NEXT: insertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],zero,zero
2134+
; SSE41-NEXT: movaps %xmm2, %xmm3
2135+
; SSE41-NEXT: addps %xmm2, %xmm3
2136+
; SSE41-NEXT: movdqa %xmm1, %xmm0
2137+
; SSE41-NEXT: blendvps %xmm0, %xmm3, %xmm2
2138+
; SSE41-NEXT: movaps %xmm2, %xmm0
21422139
; SSE41-NEXT: retq
21432140
;
21442141
; AVX1-LABEL: uitofp_4i64_to_4f32_undef:

0 commit comments

Comments
 (0)