Skip to content

Commit 6caf9f8

Browse files
authored
[X86] combineStore - fold scalar float store(fabs/fneg(load())) -> store(and/xor(load(),c)) (#118680)
As noted on #117557 - its not worth performing scalar float fabs/fneg on the fpu if we're not doing any other fp ops. This is currently limited to store + load pairs - I could try to extend this further if necessary, but we need to be careful that we don't end up in an infinite loop with the DAGCombiner foldBitcastedFPLogic combine. Fixes #117557
1 parent 2e51e15 commit 6caf9f8

File tree

4 files changed

+49
-41
lines changed

4 files changed

+49
-41
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52661,6 +52661,29 @@ static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
5266152661
St->getMemOperand()->getFlags());
5266252662
}
5266352663

52664+
// Convert scalar fabs/fneg load-store to integer equivalents.
52665+
if ((VT == MVT::f32 || VT == MVT::f64) &&
52666+
(StoredVal.getOpcode() == ISD::FABS ||
52667+
StoredVal.getOpcode() == ISD::FNEG) &&
52668+
ISD::isNormalLoad(StoredVal.getOperand(0).getNode()) &&
52669+
StoredVal.hasOneUse() && StoredVal.getOperand(0).hasOneUse()) {
52670+
MVT IntVT = VT.getSimpleVT().changeTypeToInteger();
52671+
if (TLI.isTypeLegal(IntVT)) {
52672+
APInt SignMask = APInt::getSignMask(VT.getScalarSizeInBits());
52673+
unsigned SignOp = ISD::XOR;
52674+
if (StoredVal.getOpcode() == ISD::FABS) {
52675+
SignMask = ~SignMask;
52676+
SignOp = ISD::AND;
52677+
}
52678+
SDValue LogicOp = DAG.getNode(
52679+
SignOp, dl, IntVT, DAG.getBitcast(IntVT, StoredVal.getOperand(0)),
52680+
DAG.getConstant(SignMask, dl, IntVT));
52681+
return DAG.getStore(St->getChain(), dl, LogicOp, St->getBasePtr(),
52682+
St->getPointerInfo(), St->getOriginalAlign(),
52683+
St->getMemOperand()->getFlags());
52684+
}
52685+
}
52686+
5266452687
// If we are saving a 32-byte vector and 32-byte stores are slow, such as on
5266552688
// Sandy Bridge, perform two 16-byte stores.
5266652689
unsigned Fast;

llvm/test/CodeGen/X86/combine-fabs.ll

Lines changed: 9 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -135,20 +135,16 @@ define <4 x float> @combine_vec_fabs_fcopysign(<4 x float> %a, <4 x float> %b) {
135135
ret <4 x float> %2
136136
}
137137

138-
; TODO: store(fabs(load())) - convert scalar to integer
138+
; store(fabs(load())) - convert scalar to integer
139139
define void @combine_fabs_int_rmw_f64(ptr %ptr) {
140140
; SSE-LABEL: combine_fabs_int_rmw_f64:
141141
; SSE: # %bb.0:
142-
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
143-
; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
144-
; SSE-NEXT: movlps %xmm0, (%rdi)
142+
; SSE-NEXT: andb $127, 7(%rdi)
145143
; SSE-NEXT: retq
146144
;
147145
; AVX-LABEL: combine_fabs_int_rmw_f64:
148146
; AVX: # %bb.0:
149-
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
150-
; AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
151-
; AVX-NEXT: vmovlps %xmm0, (%rdi)
147+
; AVX-NEXT: andb $127, 7(%rdi)
152148
; AVX-NEXT: retq
153149
%1 = load double, ptr %ptr
154150
%2 = call double @llvm.fabs.f64(double %1)
@@ -159,17 +155,16 @@ define void @combine_fabs_int_rmw_f64(ptr %ptr) {
159155
define void @combine_fabs_int_f32(ptr %src, ptr %dst) {
160156
; SSE-LABEL: combine_fabs_int_f32:
161157
; SSE: # %bb.0:
162-
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
163-
; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
164-
; SSE-NEXT: movss %xmm0, (%rsi)
158+
; SSE-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
159+
; SSE-NEXT: andl (%rdi), %eax
160+
; SSE-NEXT: movl %eax, (%rsi)
165161
; SSE-NEXT: retq
166162
;
167163
; AVX-LABEL: combine_fabs_int_f32:
168164
; AVX: # %bb.0:
169-
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
170-
; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
171-
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
172-
; AVX-NEXT: vmovss %xmm0, (%rsi)
165+
; AVX-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
166+
; AVX-NEXT: andl (%rdi), %eax
167+
; AVX-NEXT: movl %eax, (%rsi)
173168
; AVX-NEXT: retq
174169
%1 = load float, ptr %src
175170
%2 = call float @llvm.fabs.f32(float %1)

llvm/test/CodeGen/X86/combine-fneg.ll

Lines changed: 9 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -205,21 +205,17 @@ define <4 x float> @fneg(<4 x float> %Q) nounwind {
205205
ret <4 x float> %tmp
206206
}
207207

208-
; TODO: store(fneg(load())) - convert scalar to integer
208+
; store(fneg(load())) - convert scalar to integer
209209
define void @fneg_int_rmw_f32(ptr %ptr) {
210210
; X86-SSE-LABEL: fneg_int_rmw_f32:
211211
; X86-SSE: # %bb.0:
212212
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
213-
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
214-
; X86-SSE-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
215-
; X86-SSE-NEXT: movss %xmm0, (%eax)
213+
; X86-SSE-NEXT: xorb $-128, 3(%eax)
216214
; X86-SSE-NEXT: retl
217215
;
218216
; X64-SSE-LABEL: fneg_int_rmw_f32:
219217
; X64-SSE: # %bb.0:
220-
; X64-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
221-
; X64-SSE-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
222-
; X64-SSE-NEXT: movss %xmm0, (%rdi)
218+
; X64-SSE-NEXT: xorb $-128, 3(%rdi)
223219
; X64-SSE-NEXT: retq
224220
%1 = load float, ptr %ptr
225221
%2 = fneg float %1
@@ -246,19 +242,12 @@ define void @fneg_int_f64(ptr %src, ptr %dst) {
246242
; X86-SSE2-NEXT: movlps %xmm0, (%eax)
247243
; X86-SSE2-NEXT: retl
248244
;
249-
; X64-SSE1-LABEL: fneg_int_f64:
250-
; X64-SSE1: # %bb.0:
251-
; X64-SSE1-NEXT: fldl (%rdi)
252-
; X64-SSE1-NEXT: fchs
253-
; X64-SSE1-NEXT: fstpl (%rsi)
254-
; X64-SSE1-NEXT: retq
255-
;
256-
; X64-SSE2-LABEL: fneg_int_f64:
257-
; X64-SSE2: # %bb.0:
258-
; X64-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
259-
; X64-SSE2-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
260-
; X64-SSE2-NEXT: movlps %xmm0, (%rsi)
261-
; X64-SSE2-NEXT: retq
245+
; X64-SSE-LABEL: fneg_int_f64:
246+
; X64-SSE: # %bb.0:
247+
; X64-SSE-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
248+
; X64-SSE-NEXT: xorq (%rdi), %rax
249+
; X64-SSE-NEXT: movq %rax, (%rsi)
250+
; X64-SSE-NEXT: retq
262251
%1 = load double, ptr %src
263252
%2 = fneg double %1
264253
store double %2, ptr %dst

llvm/test/CodeGen/X86/fsxor-alignment.ll

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,15 +8,16 @@
88
define void @foo(ptr %p, ptr %q, float %s, float %y) nounwind {
99
; CHECK-LABEL: foo:
1010
; CHECK: # %bb.0:
11+
; CHECK-NEXT: pushl %esi
1112
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
1213
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
13-
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
14-
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
15-
; CHECK-NEXT: xorps %xmm1, %xmm0
16-
; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
17-
; CHECK-NEXT: xorps %xmm1, %xmm2
18-
; CHECK-NEXT: movss %xmm0, (%ecx)
19-
; CHECK-NEXT: movss %xmm2, (%eax)
14+
; CHECK-NEXT: movl $-2147483648, %edx # imm = 0x80000000
15+
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
16+
; CHECK-NEXT: xorl %edx, %esi
17+
; CHECK-NEXT: movl %esi, (%ecx)
18+
; CHECK-NEXT: xorl {{[0-9]+}}(%esp), %edx
19+
; CHECK-NEXT: movl %edx, (%eax)
20+
; CHECK-NEXT: popl %esi
2021
; CHECK-NEXT: retl
2122
%ss = fsub float -0.0, %s
2223
%yy = fsub float -0.0, %y

0 commit comments

Comments
 (0)