-
Notifications
You must be signed in to change notification settings - Fork 15.2k
release/21.x: [SPARC] Use FMA instructions when we have UA2007 (#148434) #154003
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
|
@arsenm What do you think about merging this PR to the release branch? |
|
@llvm/pr-subscribers-backend-sparc Author: None (llvmbot) ChangesBackport 111219e Requested by: @brad0 Full diff: https://github.com/llvm/llvm-project/pull/154003.diff 4 Files Affected:
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 1aa8efe3e9979..f81fdf0ccc82b 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1799,12 +1799,14 @@ SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
setOperationAction(ISD::FREM , MVT::f64, Expand);
- setOperationAction(ISD::FMA , MVT::f64, Expand);
+ setOperationAction(ISD::FMA, MVT::f64,
+ Subtarget->isUA2007() ? Legal : Expand);
setOperationAction(ISD::FSIN , MVT::f32, Expand);
setOperationAction(ISD::FCOS , MVT::f32, Expand);
setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
setOperationAction(ISD::FREM , MVT::f32, Expand);
- setOperationAction(ISD::FMA, MVT::f32, Expand);
+ setOperationAction(ISD::FMA, MVT::f32,
+ Subtarget->isUA2007() ? Legal : Expand);
setOperationAction(ISD::ROTL , MVT::i32, Expand);
setOperationAction(ISD::ROTR , MVT::i32, Expand);
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
@@ -3550,6 +3552,11 @@ bool SparcTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
return isCheapToSpeculateCtlz(Ty);
}
+bool SparcTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
+ EVT VT) const {
+ return Subtarget->isUA2007() && !Subtarget->useSoftFloat();
+}
+
// Override to disable global variable loading on Linux.
void SparcTargetLowering::insertSSPDeclarations(Module &M) const {
if (!Subtarget->isTargetLinux())
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.h b/llvm/lib/Target/Sparc/SparcISelLowering.h
index 0d220f8c3d32e..4017beb88ff31 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -177,6 +177,11 @@ namespace llvm {
bool isCheapToSpeculateCttz(Type *Ty) const override;
+ bool enableAggressiveFMAFusion(EVT VT) const override { return true; };
+
+ bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
+ EVT VT) const override;
+
bool shouldInsertFencesForAtomic(const Instruction *I) const override {
// FIXME: We insert fences for each atomics and generate
// sub-optimal code for PSO/TSO. (Approximately nobody uses any
diff --git a/llvm/lib/Target/Sparc/SparcInstrUAOSA.td b/llvm/lib/Target/Sparc/SparcInstrUAOSA.td
index 3a30e552e6db1..ffd4423137e3f 100644
--- a/llvm/lib/Target/Sparc/SparcInstrUAOSA.td
+++ b/llvm/lib/Target/Sparc/SparcInstrUAOSA.td
@@ -66,3 +66,15 @@ defm CXBCOND : F2_56<"cxb", 1>;
def FPMADDX : FourOp<"fpmaddx", 0b110111, 0b0000, DFPRegs>;
def FPMADDXHI : FourOp<"fpmaddxhi", 0b110111, 0b0100, DFPRegs>;
} // Predicates = [HasOSA2011]
+
+// UA2007 instruction patterns.
+let Predicates = [HasUA2007] in {
+def : Pat<(f32 (any_fma f32:$rs1, f32:$rs2, f32:$add)), (FMADDS $rs1, $rs2, $add)>;
+def : Pat<(f64 (any_fma f64:$rs1, f64:$rs2, f64:$add)), (FMADDD $rs1, $rs2, $add)>;
+def : Pat<(f32 (any_fma f32:$rs1, f32:$rs2, (fneg f32:$sub))), (FMSUBS $rs1, $rs2, $sub)>;
+def : Pat<(f64 (any_fma f64:$rs1, f64:$rs2, (fneg f64:$sub))), (FMSUBD $rs1, $rs2, $sub)>;
+def : Pat<(f32 (fneg (any_fma f32:$rs1, f32:$rs2, f32:$add))), (FNMADDS $rs1, $rs2, $add)>;
+def : Pat<(f64 (fneg (any_fma f64:$rs1, f64:$rs2, f64:$add))), (FNMADDD $rs1, $rs2, $add)>;
+def : Pat<(f32 (fneg (any_fma f32:$rs1, f32:$rs2, (fneg f32:$sub)))), (FNMSUBS $rs1, $rs2, $sub)>;
+def : Pat<(f64 (fneg (any_fma f64:$rs1, f64:$rs2, (fneg f64:$sub)))), (FNMSUBD $rs1, $rs2, $sub)>;
+} // Predicates = [HasUA2007]
diff --git a/llvm/test/CodeGen/SPARC/float-ua2007.ll b/llvm/test/CodeGen/SPARC/float-ua2007.ll
new file mode 100644
index 0000000000000..252b47943fe43
--- /dev/null
+++ b/llvm/test/CodeGen/SPARC/float-ua2007.ll
@@ -0,0 +1,275 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=sparc64 --fp-contract=fast -mattr=-ua2007 < %s | FileCheck %s -check-prefix=NO-UA2007
+; RUN: llc -mtriple=sparc64 --fp-contract=fast -mattr=+ua2007 < %s | FileCheck %s -check-prefix=UA2007
+
+define float @fmadds(float %a, float %b, float %c) nounwind {
+; NO-UA2007-LABEL: fmadds:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuls %f1, %f3, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: fadds %f0, %f5, %f0
+;
+; UA2007-LABEL: fmadds:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fmadds %f1, %f3, %f5, %f0
+ %ret = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
+ ret float %ret
+}
+
+define double @fmaddd(double %a, double %b, double %c) nounwind {
+; NO-UA2007-LABEL: fmaddd:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuld %f0, %f2, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: faddd %f0, %f4, %f0
+;
+; UA2007-LABEL: fmaddd:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fmaddd %f0, %f2, %f4, %f0
+ %ret = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
+ ret double %ret
+}
+
+define float @fmsubs(float %a, float %b, float %c) nounwind {
+; NO-UA2007-LABEL: fmsubs:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuls %f1, %f3, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: fsubs %f0, %f5, %f0
+;
+; UA2007-LABEL: fmsubs:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fmsubs %f1, %f3, %f5, %f0
+ %neg = fneg float %c
+ %ret = call float @llvm.fmuladd.f32(float %a, float %b, float %neg)
+ ret float %ret
+}
+
+define double @fmsubd(double %a, double %b, double %c) nounwind {
+; NO-UA2007-LABEL: fmsubd:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuld %f0, %f2, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: fsubd %f0, %f4, %f0
+;
+; UA2007-LABEL: fmsubd:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fmsubd %f0, %f2, %f4, %f0
+ %neg = fneg double %c
+ %ret = call double @llvm.fmuladd.f64(double %a, double %b, double %neg)
+ ret double %ret
+}
+
+define float @fnmadds(float %a, float %b, float %c) nounwind {
+; NO-UA2007-LABEL: fnmadds:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuls %f1, %f3, %f0
+; NO-UA2007-NEXT: fadds %f0, %f5, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: fnegs %f0, %f0
+;
+; UA2007-LABEL: fnmadds:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fnmadds %f1, %f3, %f5, %f0
+ %fma = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
+ %ret = fneg float %fma
+ ret float %ret
+}
+
+define double @fnmaddd(double %a, double %b, double %c) nounwind {
+; NO-UA2007-LABEL: fnmaddd:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuld %f0, %f2, %f0
+; NO-UA2007-NEXT: faddd %f0, %f4, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: fnegd %f0, %f0
+;
+; UA2007-LABEL: fnmaddd:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fnmaddd %f0, %f2, %f4, %f0
+ %fma = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
+ %ret = fneg double %fma
+ ret double %ret
+}
+
+define float @fnmsubs(float %a, float %b, float %c) nounwind {
+; NO-UA2007-LABEL: fnmsubs:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuls %f1, %f3, %f0
+; NO-UA2007-NEXT: fsubs %f0, %f5, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: fnegs %f0, %f0
+;
+; UA2007-LABEL: fnmsubs:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fnmsubs %f1, %f3, %f5, %f0
+ %neg = fneg float %c
+ %fma = call float @llvm.fmuladd.f32(float %a, float %b, float %neg)
+ %ret = fneg float %fma
+ ret float %ret
+}
+
+define double @fnmsubd(double %a, double %b, double %c) nounwind {
+; NO-UA2007-LABEL: fnmsubd:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuld %f0, %f2, %f0
+; NO-UA2007-NEXT: fsubd %f0, %f4, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: fnegd %f0, %f0
+;
+; UA2007-LABEL: fnmsubd:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fnmsubd %f0, %f2, %f4, %f0
+ %neg = fneg double %c
+ %fma = call double @llvm.fmuladd.f64(double %a, double %b, double %neg)
+ %ret = fneg double %fma
+ ret double %ret
+}
+
+
+define float @combine_madds(float %a, float %b, float %c) nounwind {
+; NO-UA2007-LABEL: combine_madds:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuls %f1, %f3, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: fadds %f0, %f5, %f0
+;
+; UA2007-LABEL: combine_madds:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fmadds %f1, %f3, %f5, %f0
+ %mul = fmul float %a, %b
+ %add = fadd float %mul, %c
+ ret float %add
+}
+
+define double @combine_maddd(double %a, double %b, double %c) nounwind {
+; NO-UA2007-LABEL: combine_maddd:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuld %f0, %f2, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: faddd %f0, %f4, %f0
+;
+; UA2007-LABEL: combine_maddd:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fmaddd %f0, %f2, %f4, %f0
+ %mul = fmul double %a, %b
+ %add = fadd double %mul, %c
+ ret double %add
+}
+
+define float @combine_msubs(float %a, float %b, float %c) nounwind {
+; NO-UA2007-LABEL: combine_msubs:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuls %f1, %f3, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: fsubs %f0, %f5, %f0
+;
+; UA2007-LABEL: combine_msubs:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fmsubs %f1, %f3, %f5, %f0
+ %mul = fmul float %a, %b
+ %sub = fsub float %mul, %c
+ ret float %sub
+}
+
+define double @combine_msubd(double %a, double %b, double %c) nounwind {
+; NO-UA2007-LABEL: combine_msubd:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuld %f0, %f2, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: fsubd %f0, %f4, %f0
+;
+; UA2007-LABEL: combine_msubd:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fmsubd %f0, %f2, %f4, %f0
+ %mul = fmul double %a, %b
+ %sub = fsub double %mul, %c
+ ret double %sub
+}
+
+define float @combine_nmadds(float %a, float %b, float %c) nounwind {
+; NO-UA2007-LABEL: combine_nmadds:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuls %f1, %f3, %f0
+; NO-UA2007-NEXT: fadds %f0, %f5, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: fnegs %f0, %f0
+;
+; UA2007-LABEL: combine_nmadds:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fnmadds %f1, %f3, %f5, %f0
+ %mul = fmul float %a, %b
+ %add = fadd float %mul, %c
+ %neg = fneg float %add
+ ret float %neg
+}
+
+define double @combine_nmaddd(double %a, double %b, double %c) nounwind {
+; NO-UA2007-LABEL: combine_nmaddd:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuld %f0, %f2, %f0
+; NO-UA2007-NEXT: faddd %f0, %f4, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: fnegd %f0, %f0
+;
+; UA2007-LABEL: combine_nmaddd:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fnmaddd %f0, %f2, %f4, %f0
+ %mul = fmul double %a, %b
+ %add = fadd double %mul, %c
+ %neg = fneg double %add
+ ret double %neg
+}
+
+define float @combine_nmsubs(float %a, float %b, float %c) nounwind {
+; NO-UA2007-LABEL: combine_nmsubs:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuls %f1, %f3, %f0
+; NO-UA2007-NEXT: fsubs %f0, %f5, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: fnegs %f0, %f0
+;
+; UA2007-LABEL: combine_nmsubs:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fnmsubs %f1, %f3, %f5, %f0
+ %mul = fmul float %a, %b
+ %sub = fsub float %mul, %c
+ %neg = fneg float %sub
+ ret float %neg
+}
+
+define double @combine_nmsubd(double %a, double %b, double %c) nounwind {
+; NO-UA2007-LABEL: combine_nmsubd:
+; NO-UA2007: ! %bb.0:
+; NO-UA2007-NEXT: fmuld %f0, %f2, %f0
+; NO-UA2007-NEXT: fsubd %f0, %f4, %f0
+; NO-UA2007-NEXT: retl
+; NO-UA2007-NEXT: fnegd %f0, %f0
+;
+; UA2007-LABEL: combine_nmsubd:
+; UA2007: ! %bb.0:
+; UA2007-NEXT: retl
+; UA2007-NEXT: fnmsubd %f0, %f2, %f4, %f0
+ %mul = fmul double %a, %b
+ %sub = fsub double %mul, %c
+ %neg = fneg double %sub
+ ret double %neg
+}
+
+declare float @llvm.fmuladd.f32(float, float, float)
+declare double @llvm.fmuladd.f64(double, double, double)
|
|
@brad0 (or anyone else). If you would like to add a note about this fix in the release notes (completely optional). Please reply to this comment with a one or two sentence description of the fix. When you are done, please add the release:note label to this PR. |
(cherry picked from commit 111219e)
Backport 111219e
Requested by: @brad0