diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index c72a016c5333b..76e9900a06a1e 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -985,6 +985,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, static const unsigned ZvfhminZvfbfminPromoteOps[] = { ISD::FMINNUM, ISD::FMAXNUM, + ISD::FMINIMUMNUM, + ISD::FMAXIMUMNUM, ISD::FADD, ISD::FSUB, ISD::FMUL, @@ -1053,7 +1055,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, // Expand various condition codes (explained above). setCondCodeAction(VFPCCToExpand, VT, Expand); - setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, VT, Legal); + setOperationAction( + {ISD::FMINNUM, ISD::FMAXNUM, ISD::FMAXIMUMNUM, ISD::FMINIMUMNUM}, VT, + Legal); setOperationAction({ISD::FMAXIMUM, ISD::FMINIMUM}, VT, Custom); setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND, @@ -1471,7 +1475,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV, ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT, ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM, - ISD::IS_FPCLASS, ISD::FMAXIMUM, ISD::FMINIMUM}, + ISD::FMINIMUMNUM, ISD::FMAXIMUMNUM, ISD::IS_FPCLASS, + ISD::FMAXIMUM, ISD::FMINIMUM}, VT, Custom); setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND, @@ -6941,9 +6946,11 @@ static unsigned getRISCVVLOp(SDValue Op) { case ISD::VP_FP_TO_UINT: return RISCVISD::VFCVT_RTZ_XU_F_VL; case ISD::FMINNUM: + case ISD::FMINIMUMNUM: case ISD::VP_FMINNUM: return RISCVISD::VFMIN_VL; case ISD::FMAXNUM: + case ISD::FMAXIMUMNUM: case ISD::VP_FMAXNUM: return RISCVISD::VFMAX_VL; case ISD::LRINT: @@ -7979,6 +7986,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, case ISD::FMA: case ISD::FMINNUM: case ISD::FMAXNUM: + case ISD::FMINIMUMNUM: + case ISD::FMAXIMUMNUM: if (isPromotedOpNeedingSplit(Op, Subtarget)) return SplitVectorOp(Op, DAG); [[fallthrough]]; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td index aea125c5348dd..93228f2a9e167 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -1360,6 +1360,8 @@ foreach vti = AllFloatVectors in { // 13.11. Vector Floating-Point MIN/MAX Instructions defm : VPatBinaryFPSDNode_VV_VF; defm : VPatBinaryFPSDNode_VV_VF; +defm : VPatBinaryFPSDNode_VV_VF; +defm : VPatBinaryFPSDNode_VV_VF; // 13.13. Vector Floating-Point Compare Instructions defm : VPatFPSetCCSDNode_VV_VF_FV; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximumnum.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximumnum.ll new file mode 100644 index 0000000000000..c8cea368f905e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximumnum.ll @@ -0,0 +1,201 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=riscv64-linux-gnu --mattr=+v,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: llc --mtriple=riscv64-linux-gnu --mattr=+v,+zvfhmin,+zfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN + +define <2 x double> @max_v2f64(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: max_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %c = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> %a, <2 x double> %b) + ret <2 x double> %c +} + +define <3 x double> @max_v3f64(<3 x double> %a, <3 x double> %b) { +; CHECK-LABEL: max_v3f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %c = call <3 x double> @llvm.maximumnum.v3f64(<3 x double> %a, <3 x double> %b) + ret <3 x double> %c +} + +define <4 x double> @max_v4f64(<4 x double> %a, <4 x double> %b) { +; CHECK-LABEL: max_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %c = call <4 x double> @llvm.maximumnum.v4f64(<4 x double> %a, <4 x double> %b) + ret <4 x double> %c +} + +define <2 x float> @max_v2f32(<2 x float> %a, <2 x float> %b) { +; CHECK-LABEL: max_v2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %c = call <2 x float> @llvm.maximumnum.v2f32(<2 x float> %a, <2 x float> %b) + ret <2 x float> %c +} + +define <3 x float> @max_v3f32(<3 x float> %a, <3 x float> %b) { +; CHECK-LABEL: max_v3f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %c = call <3 x float> @llvm.maximumnum.v3f32(<3 x float> %a, <3 x float> %b) + ret <3 x float> %c +} + +define <4 x float> @max_v4f32(<4 x float> %a, <4 x float> %b) { +; CHECK-LABEL: max_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %c = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %a, <4 x float> %b) + ret <4 x float> %c +} + +define <5 x float> @max_v5f32(<5 x float> %a, <5 x float> %b) { +; CHECK-LABEL: max_v5f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %c = call <5 x float> @llvm.maximumnum.v5f32(<5 x float> %a, <5 x float> %b) + ret <5 x float> %c +} + +define <8 x float> @max_v8f32(<8 x float> %a, <8 x float> %b) { +; CHECK-LABEL: max_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %c = call <8 x float> @llvm.maximumnum.v8f32(<8 x float> %a, <8 x float> %b) + ret <8 x float> %c +} + +define <2 x half> @max_v2f16(<2 x half> %a, <2 x half> %b) { +; ZVFH-LABEL: max_v2f16: +; ZVFH: # %bb.0: # %entry +; ZVFH-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: max_v2f16: +; ZVFHMIN: # %bb.0: # %entry +; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +entry: + %c = call <2 x half> @llvm.maximumnum.v2f16(<2 x half> %a, <2 x half> %b) + ret <2 x half> %c +} + +define <4 x half> @max_v4f16(<4 x half> %a, <4 x half> %b) { +; ZVFH-LABEL: max_v4f16: +; ZVFH: # %bb.0: # %entry +; ZVFH-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: max_v4f16: +; ZVFHMIN: # %bb.0: # %entry +; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +entry: + %c = call <4 x half> @llvm.maximumnum.v4f16(<4 x half> %a, <4 x half> %b) + ret <4 x half> %c +} + +define <8 x half> @max_v8f16(<8 x half> %a, <8 x half> %b) { +; ZVFH-LABEL: max_v8f16: +; ZVFH: # %bb.0: # %entry +; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: max_v8f16: +; ZVFHMIN: # %bb.0: # %entry +; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v10, v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret +entry: + %c = call <8 x half> @llvm.maximumnum.v8f16(<8 x half> %a, <8 x half> %b) + ret <8 x half> %c +} + +define <9 x half> @max_v9f16(<9 x half> %a, <9 x half> %b) { +; ZVFH-LABEL: max_v9f16: +; ZVFH: # %bb.0: # %entry +; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: max_v9f16: +; ZVFHMIN: # %bb.0: # %entry +; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v12, v16, v12 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret +entry: + %c = call <9 x half> @llvm.maximumnum.v9f16(<9 x half> %a, <9 x half> %b) + ret <9 x half> %c +} + +define <16 x half> @max_v16f16(<16 x half> %a, <16 x half> %b) { +; ZVFH-LABEL: max_v16f16: +; ZVFH: # %bb.0: # %entry +; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: max_v16f16: +; ZVFHMIN: # %bb.0: # %entry +; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v12, v16, v12 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret +entry: + %c = call <16 x half> @llvm.maximumnum.v16f16(<16 x half> %a, <16 x half> %b) + ret <16 x half> %c +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimumnum.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimumnum.ll new file mode 100644 index 0000000000000..36114d56aa0d6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimumnum.ll @@ -0,0 +1,201 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=riscv64-linux-gnu --mattr=+v,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: llc --mtriple=riscv64-linux-gnu --mattr=+v,+zvfhmin,+zfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN + +define <2 x double> @min_v2f64(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: min_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %c = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %a, <2 x double> %b) + ret <2 x double> %c +} + +define <3 x double> @min_v3f64(<3 x double> %a, <3 x double> %b) { +; CHECK-LABEL: min_v3f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %c = call <3 x double> @llvm.minimumnum.v3f64(<3 x double> %a, <3 x double> %b) + ret <3 x double> %c +} + +define <4 x double> @min_v4f64(<4 x double> %a, <4 x double> %b) { +; CHECK-LABEL: min_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %c = call <4 x double> @llvm.minimumnum.v4f64(<4 x double> %a, <4 x double> %b) + ret <4 x double> %c +} + +define <2 x float> @min_v2f32(<2 x float> %a, <2 x float> %b) { +; CHECK-LABEL: min_v2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %c = call <2 x float> @llvm.minimumnum.v2f32(<2 x float> %a, <2 x float> %b) + ret <2 x float> %c +} + +define <3 x float> @min_v3f32(<3 x float> %a, <3 x float> %b) { +; CHECK-LABEL: min_v3f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %c = call <3 x float> @llvm.minimumnum.v3f32(<3 x float> %a, <3 x float> %b) + ret <3 x float> %c +} + +define <4 x float> @min_v4f32(<4 x float> %a, <4 x float> %b) { +; CHECK-LABEL: min_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %c = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> %a, <4 x float> %b) + ret <4 x float> %c +} + +define <5 x float> @min_v5f32(<5 x float> %a, <5 x float> %b) { +; CHECK-LABEL: min_v5f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %c = call <5 x float> @llvm.minimumnum.v5f32(<5 x float> %a, <5 x float> %b) + ret <5 x float> %c +} + +define <8 x float> @min_v8f32(<8 x float> %a, <8 x float> %b) { +; CHECK-LABEL: min_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %c = call <8 x float> @llvm.minimumnum.v8f32(<8 x float> %a, <8 x float> %b) + ret <8 x float> %c +} + +define <2 x half> @min_v2f16(<2 x half> %a, <2 x half> %b) { +; ZVFH-LABEL: min_v2f16: +; ZVFH: # %bb.0: # %entry +; ZVFH-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; ZVFH-NEXT: vfmin.vv v8, v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: min_v2f16: +; ZVFHMIN: # %bb.0: # %entry +; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmin.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +entry: + %c = call <2 x half> @llvm.minimumnum.v2f16(<2 x half> %a, <2 x half> %b) + ret <2 x half> %c +} + +define <4 x half> @min_v4f16(<4 x half> %a, <4 x half> %b) { +; ZVFH-LABEL: min_v4f16: +; ZVFH: # %bb.0: # %entry +; ZVFH-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; ZVFH-NEXT: vfmin.vv v8, v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: min_v4f16: +; ZVFHMIN: # %bb.0: # %entry +; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmin.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +entry: + %c = call <4 x half> @llvm.minimumnum.v4f16(<4 x half> %a, <4 x half> %b) + ret <4 x half> %c +} + +define <8 x half> @min_v8f16(<8 x half> %a, <8 x half> %b) { +; ZVFH-LABEL: min_v8f16: +; ZVFH: # %bb.0: # %entry +; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; ZVFH-NEXT: vfmin.vv v8, v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: min_v8f16: +; ZVFHMIN: # %bb.0: # %entry +; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmin.vv v10, v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret +entry: + %c = call <8 x half> @llvm.minimumnum.v8f16(<8 x half> %a, <8 x half> %b) + ret <8 x half> %c +} + +define <9 x half> @min_v9f16(<9 x half> %a, <9 x half> %b) { +; ZVFH-LABEL: min_v9f16: +; ZVFH: # %bb.0: # %entry +; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFH-NEXT: vfmin.vv v8, v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: min_v9f16: +; ZVFHMIN: # %bb.0: # %entry +; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmin.vv v12, v16, v12 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret +entry: + %c = call <9 x half> @llvm.minimumnum.v9f16(<9 x half> %a, <9 x half> %b) + ret <9 x half> %c +} + +define <16 x half> @min_v16f16(<16 x half> %a, <16 x half> %b) { +; ZVFH-LABEL: min_v16f16: +; ZVFH: # %bb.0: # %entry +; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFH-NEXT: vfmin.vv v8, v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: min_v16f16: +; ZVFHMIN: # %bb.0: # %entry +; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmin.vv v12, v16, v12 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret +entry: + %c = call <16 x half> @llvm.minimumnum.v16f16(<16 x half> %a, <16 x half> %b) + ret <16 x half> %c +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximumnum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximumnum-sdnode.ll new file mode 100644 index 0000000000000..dce5004d03e16 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fmaximumnum-sdnode.ll @@ -0,0 +1,853 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ +; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFH +; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ +; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFH +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ +; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ +; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN + +define @vfmax_vv_nxv1bf16( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv1bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 +; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vv v9, v9, v10 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv1bf16( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv1bf16( %va, bfloat %b) { +; CHECK-LABEL: vfmax_vf_nxv1bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vf v9, v9, fa5 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, bfloat %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv1bf16( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv2bf16( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv2bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 +; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmax.vv v9, v9, v10 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv2bf16( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv2bf16( %va, bfloat %b) { +; CHECK-LABEL: vfmax_vf_nxv2bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmax.vf v9, v9, fa5 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, bfloat %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv2bf16( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv4bf16( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 +; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; CHECK-NEXT: vfmax.vv v10, v12, v10 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv4bf16( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv4bf16( %va, bfloat %b) { +; CHECK-LABEL: vfmax_vf_nxv4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; CHECK-NEXT: vfmax.vf v10, v10, fa5 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, bfloat %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv4bf16( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv8bf16( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10 +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vv v12, v16, v12 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv8bf16( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv8bf16( %va, bfloat %b) { +; CHECK-LABEL: vfmax_vf_nxv8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vf v12, v12, fa5 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, bfloat %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8bf16( %va, %splat) + ret %vc +} + +define @vfmax_fv_nxv8bf16( %va, bfloat %b) { +; CHECK-LABEL: vfmax_fv_nxv8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vf v12, v12, fa5 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, bfloat %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8bf16( %splat, %va) + ret %vc +} + +define @vfmax_vv_nxv16bf16( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv16bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vv v16, v24, v16 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv16bf16( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv16bf16( %va, bfloat %b) { +; CHECK-LABEL: vfmax_vf_nxv16bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vf v16, v16, fa5 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 +; CHECK-NEXT: ret + %head = insertelement poison, bfloat %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv16bf16( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv32bf16( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv32bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8 +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20 +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 +; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vv v0, v0, v8 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0 +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vv v16, v16, v24 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv32bf16( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv32bf16( %va, bfloat %b) { +; CHECK-LABEL: vfmax_vf_nxv32bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: fmv.x.h a0, fa0 +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12 +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8 +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vv v0, v8, v0 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0 +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vv v16, v24, v16 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 +; CHECK-NEXT: ret + %head = insertelement poison, bfloat %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv32bf16( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv1f16( %va, %vb) { +; ZVFH-LABEL: vfmax_vv_nxv1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmax_vv_nxv1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret + %vc = call @llvm.maximumnum.nxv1f16( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv1f16( %va, half %b) { +; ZVFH-LABEL: vfmax_vf_nxv1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfmax.vf v8, v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmax_vf_nxv1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmax.vf v9, v9, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv1f16( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv2f16( %va, %vb) { +; ZVFH-LABEL: vfmax_vv_nxv2f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmax_vv_nxv2f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret + %vc = call @llvm.maximumnum.nxv2f16( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv2f16( %va, half %b) { +; ZVFH-LABEL: vfmax_vf_nxv2f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfmax.vf v8, v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmax_vf_nxv2f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmax.vf v9, v9, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv2f16( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv4f16( %va, %vb) { +; ZVFH-LABEL: vfmax_vv_nxv4f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmax_vv_nxv4f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v10, v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret + %vc = call @llvm.maximumnum.nxv4f16( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv4f16( %va, half %b) { +; ZVFH-LABEL: vfmax_vf_nxv4f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfmax.vf v8, v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmax_vf_nxv4f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmax.vf v10, v10, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv4f16( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv8f16( %va, %vb) { +; ZVFH-LABEL: vfmax_vv_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmax_vv_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v12, v16, v12 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret + %vc = call @llvm.maximumnum.nxv8f16( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv8f16( %va, half %b) { +; ZVFH-LABEL: vfmax_vf_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfmax.vf v8, v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmax_vf_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmax.vf v12, v12, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8f16( %va, %splat) + ret %vc +} + +define @vfmax_fv_nxv8f16( %va, half %b) { +; ZVFH-LABEL: vfmax_fv_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfmax.vf v8, v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmax_fv_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmax.vf v12, v12, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8f16( %splat, %va) + ret %vc +} + +define @vfmax_vv_nxv16f16( %va, %vb) { +; ZVFH-LABEL: vfmax_vv_nxv16f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmax_vv_nxv16f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v16, v24, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret + %vc = call @llvm.maximumnum.nxv16f16( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv16f16( %va, half %b) { +; ZVFH-LABEL: vfmax_vf_nxv16f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfmax.vf v8, v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmax_vf_nxv16f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmax.vf v16, v16, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv16f16( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv32f16( %va, %vb) { +; ZVFH-LABEL: vfmax_vv_nxv32f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmax_vv_nxv32f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: sub sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v0, v0, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v16, v16, v24 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret + %vc = call @llvm.maximumnum.nxv32f16( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv32f16( %va, half %b) { +; ZVFH-LABEL: vfmax_vf_nxv32f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; ZVFH-NEXT: vfmax.vf v8, v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmax_vf_nxv32f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: sub sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a0, fa0 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v8, a0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v0, v8, v0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v16, v24, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv32f16( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv1f32( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv1f32( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv1f32( %va, float %b) { +; CHECK-LABEL: vfmax_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv1f32( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv2f32( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv2f32( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv2f32( %va, float %b) { +; CHECK-LABEL: vfmax_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv2f32( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv4f32( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv4f32( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv4f32( %va, float %b) { +; CHECK-LABEL: vfmax_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv4f32( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv8f32( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: vfmax_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8f32( %va, %splat) + ret %vc +} + +define @vfmax_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: vfmax_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8f32( %splat, %va) + ret %vc +} + +define @vfmax_vv_nxv16f32( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v16 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv16f32( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv16f32( %va, float %b) { +; CHECK-LABEL: vfmax_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv16f32( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv1f64( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv1f64( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv1f64( %va, double %b) { +; CHECK-LABEL: vfmax_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv1f64( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv2f64( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv2f64( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv2f64( %va, double %b) { +; CHECK-LABEL: vfmax_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv2f64( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv4f64( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv4f64( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv4f64( %va, double %b) { +; CHECK-LABEL: vfmax_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv4f64( %va, %splat) + ret %vc +} + +define @vfmax_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: vfmax_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v16 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv8f64( %va, %vb) + ret %vc +} + +define @vfmax_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: vfmax_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8f64( %va, %splat) + ret %vc +} + +define @vfmax_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: vfmax_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8f64( %splat, %va) + ret %vc +} + +define @vfmax_vv_mask_nxv8f32( %va, %vb, %mask) { +; CHECK-LABEL: vfmax_vv_mask_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmerge.vvm v12, v16, v12, v0 +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %vs = select %mask, %vb, splat (float 0.0) + %vc = call fast @llvm.maximumnum.nxv8f32( %va, %vs) + ret %vc +} + +define @vfmax_vf_mask_nxv8f32( %va, float %b, %mask) { +; CHECK-LABEL: vfmax_vf_mask_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vfmerge.vfm v12, v12, fa0, v0 +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %head1 = insertelement poison, float %b, i32 0 + %splat1 = shufflevector %head1, poison, zeroinitializer + %vs = select %mask, %splat1, splat (float 0.0) + %vc = call fast @llvm.maximumnum.nxv8f32( %va, %vs) + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimumnum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fminimumnum-sdnode.ll new file mode 100644 index 0000000000000..fcb8ad82342d5 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fminimumnum-sdnode.ll @@ -0,0 +1,853 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ +; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFH +; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ +; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFH +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ +; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ +; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN + +define @vfadd_vv_nxv1bf16( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv1bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 +; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vv v9, v9, v10 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv1bf16( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv1bf16( %va, bfloat %b) { +; CHECK-LABEL: vfadd_vf_nxv1bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vf v9, v9, fa5 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, bfloat %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv1bf16( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv2bf16( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv2bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 +; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmax.vv v9, v9, v10 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv2bf16( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv2bf16( %va, bfloat %b) { +; CHECK-LABEL: vfadd_vf_nxv2bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmax.vf v9, v9, fa5 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, bfloat %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv2bf16( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv4bf16( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 +; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; CHECK-NEXT: vfmax.vv v10, v12, v10 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv4bf16( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv4bf16( %va, bfloat %b) { +; CHECK-LABEL: vfadd_vf_nxv4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; CHECK-NEXT: vfmax.vf v10, v10, fa5 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, bfloat %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv4bf16( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv8bf16( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10 +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vv v12, v16, v12 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv8bf16( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv8bf16( %va, bfloat %b) { +; CHECK-LABEL: vfadd_vf_nxv8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vf v12, v12, fa5 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, bfloat %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8bf16( %va, %splat) + ret %vc +} + +define @vfadd_fv_nxv8bf16( %va, bfloat %b) { +; CHECK-LABEL: vfadd_fv_nxv8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vf v12, v12, fa5 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, bfloat %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8bf16( %splat, %va) + ret %vc +} + +define @vfadd_vv_nxv16bf16( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv16bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vv v16, v24, v16 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv16bf16( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv16bf16( %va, bfloat %b) { +; CHECK-LABEL: vfadd_vf_nxv16bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vf v16, v16, fa5 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 +; CHECK-NEXT: ret + %head = insertelement poison, bfloat %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv16bf16( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv32bf16( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv32bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8 +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20 +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 +; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vv v0, v0, v8 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0 +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vv v16, v16, v24 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv32bf16( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv32bf16( %va, bfloat %b) { +; CHECK-LABEL: vfadd_vf_nxv32bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: fmv.x.h a0, fa0 +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12 +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8 +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vv v0, v8, v0 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0 +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vv v16, v24, v16 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 +; CHECK-NEXT: ret + %head = insertelement poison, bfloat %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv32bf16( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv1f16( %va, %vb) { +; ZVFH-LABEL: vfadd_vv_nxv1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret + %vc = call @llvm.maximumnum.nxv1f16( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv1f16( %va, half %b) { +; ZVFH-LABEL: vfadd_vf_nxv1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfmax.vf v8, v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmax.vf v9, v9, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv1f16( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv2f16( %va, %vb) { +; ZVFH-LABEL: vfadd_vv_nxv2f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv2f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret + %vc = call @llvm.maximumnum.nxv2f16( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv2f16( %va, half %b) { +; ZVFH-LABEL: vfadd_vf_nxv2f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfmax.vf v8, v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv2f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmax.vf v9, v9, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv2f16( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv4f16( %va, %vb) { +; ZVFH-LABEL: vfadd_vv_nxv4f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv4f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v10, v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret + %vc = call @llvm.maximumnum.nxv4f16( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv4f16( %va, half %b) { +; ZVFH-LABEL: vfadd_vf_nxv4f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfmax.vf v8, v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv4f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmax.vf v10, v10, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv4f16( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv8f16( %va, %vb) { +; ZVFH-LABEL: vfadd_vv_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v12, v16, v12 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret + %vc = call @llvm.maximumnum.nxv8f16( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv8f16( %va, half %b) { +; ZVFH-LABEL: vfadd_vf_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfmax.vf v8, v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmax.vf v12, v12, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8f16( %va, %splat) + ret %vc +} + +define @vfadd_fv_nxv8f16( %va, half %b) { +; ZVFH-LABEL: vfadd_fv_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfmax.vf v8, v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_fv_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmax.vf v12, v12, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8f16( %splat, %va) + ret %vc +} + +define @vfadd_vv_nxv16f16( %va, %vb) { +; ZVFH-LABEL: vfadd_vv_nxv16f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv16f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v16, v24, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret + %vc = call @llvm.maximumnum.nxv16f16( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv16f16( %va, half %b) { +; ZVFH-LABEL: vfadd_vf_nxv16f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfmax.vf v8, v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv16f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmax.vf v16, v16, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv16f16( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv32f16( %va, %vb) { +; ZVFH-LABEL: vfadd_vv_nxv32f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; ZVFH-NEXT: vfmax.vv v8, v8, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv32f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: sub sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v0, v0, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v16, v16, v24 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret + %vc = call @llvm.maximumnum.nxv32f16( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv32f16( %va, half %b) { +; ZVFH-LABEL: vfadd_vf_nxv32f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; ZVFH-NEXT: vfmax.vf v8, v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv32f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: sub sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a0, fa0 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v8, a0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v0, v8, v0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmax.vv v16, v24, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv32f16( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv1f32( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv1f32( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv1f32( %va, float %b) { +; CHECK-LABEL: vfadd_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv1f32( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv2f32( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv2f32( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv2f32( %va, float %b) { +; CHECK-LABEL: vfadd_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv2f32( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv4f32( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv4f32( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv4f32( %va, float %b) { +; CHECK-LABEL: vfadd_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv4f32( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv8f32( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: vfadd_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8f32( %va, %splat) + ret %vc +} + +define @vfadd_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: vfadd_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8f32( %splat, %va) + ret %vc +} + +define @vfadd_vv_nxv16f32( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v16 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv16f32( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv16f32( %va, float %b) { +; CHECK-LABEL: vfadd_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv16f32( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv1f64( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv1f64( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv1f64( %va, double %b) { +; CHECK-LABEL: vfadd_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv1f64( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv2f64( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv2f64( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv2f64( %va, double %b) { +; CHECK-LABEL: vfadd_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv2f64( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv4f64( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv4f64( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv4f64( %va, double %b) { +; CHECK-LABEL: vfadd_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv4f64( %va, %splat) + ret %vc +} + +define @vfadd_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v16 +; CHECK-NEXT: ret + %vc = call @llvm.maximumnum.nxv8f64( %va, %vb) + ret %vc +} + +define @vfadd_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: vfadd_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8f64( %va, %splat) + ret %vc +} + +define @vfadd_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: vfadd_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.maximumnum.nxv8f64( %splat, %va) + ret %vc +} + +define @vfadd_vv_mask_nxv8f32( %va, %vb, %mask) { +; CHECK-LABEL: vfadd_vv_mask_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmerge.vvm v12, v16, v12, v0 +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %vs = select %mask, %vb, splat (float 0.0) + %vc = call fast @llvm.maximumnum.nxv8f32( %va, %vs) + ret %vc +} + +define @vfadd_vf_mask_nxv8f32( %va, float %b, %mask) { +; CHECK-LABEL: vfadd_vf_mask_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vfmerge.vfm v12, v12, fa0, v0 +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %head1 = insertelement poison, float %b, i32 0 + %splat1 = shufflevector %head1, poison, zeroinitializer + %vs = select %mask, %splat1, splat (float 0.0) + %vc = call fast @llvm.maximumnum.nxv8f32( %va, %vs) + ret %vc +} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll b/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll index b97fa2499cfd5..1319454b7a1a1 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll @@ -1,14 +1,62 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; FIXME: fmaximumnum/fminimumnum have no vectorizing support yet. -; RUN: opt --passes=loop-vectorize --mtriple=riscv64 -mattr="+zvfh,+v,+zfh" -S < %s | FileCheck %s +; RUN: opt --passes=loop-vectorize --mtriple=riscv64 -mattr="+v,+zvfh" -S < %s | FileCheck %s +; RUN: opt --passes=loop-vectorize --mtriple=riscv64 -mattr="+v,+zvfhmin" -S < %s | FileCheck %s --check-prefix=ZVFHMIN define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef readonly captures(none) %input2, ptr noundef writeonly captures(none) %output) { ; CHECK-LABEL: define void @fmin32( ; CHECK-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 +; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 +; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[TMP8]], 4 +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]]) +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK: [[VECTOR_MEMCHECK]]: +; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP15]], 4 +; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP4]], 4 +; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]] +; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP16]] +; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]] +; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], [[TMP7]] +; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP9]], 4 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP18]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] +; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP19]], 4 +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP3]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP6]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = call @llvm.minimumnum.nxv4f32( [[WIDE_LOAD]], [[WIDE_LOAD5]]) +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i32 0 +; CHECK-NEXT: store [[TMP17]], ptr [[TMP11]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDVARS_IV]] ; CHECK-NEXT: [[IN1:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDVARS_IV]] @@ -18,10 +66,76 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: store float [[OUT]], ptr [[ARRAYIDX4]], align 4 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; +; ZVFHMIN-LABEL: define void @fmin32( +; ZVFHMIN-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0:[0-9]+]] { +; ZVFHMIN-NEXT: [[ENTRY:.*]]: +; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 +; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 +; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 +; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]] +; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; ZVFHMIN: [[VECTOR_MEMCHECK]]: +; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4 +; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; ZVFHMIN-NEXT: [[TMP6:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]] +; ZVFHMIN-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; ZVFHMIN-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4 +; ZVFHMIN-NEXT: [[TMP8:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]] +; ZVFHMIN-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] +; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; ZVFHMIN: [[VECTOR_PH]]: +; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4 +; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] +; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] +; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 4 +; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]] +; ZVFHMIN: [[VECTOR_BODY]]: +; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i32 0 +; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP14]], align 4 +; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw float, ptr [[TMP15]], i32 0 +; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP16]], align 4 +; ZVFHMIN-NEXT: [[TMP17:%.*]] = call @llvm.minimumnum.nxv4f32( [[WIDE_LOAD]], [[WIDE_LOAD5]]) +; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw float, ptr [[TMP18]], i32 0 +; ZVFHMIN-NEXT: store [[TMP17]], ptr [[TMP19]], align 4 +; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; ZVFHMIN: [[MIDDLE_BLOCK]]: +; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] +; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; ZVFHMIN: [[SCALAR_PH]]: +; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]] +; ZVFHMIN: [[FOR_BODY]]: +; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; ZVFHMIN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: [[IN1:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; ZVFHMIN-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: [[IN2:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; ZVFHMIN-NEXT: [[OUT:%.*]] = tail call float @llvm.minimumnum.f32(float [[IN1]], float [[IN2]]) +; ZVFHMIN-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: store float [[OUT]], ptr [[ARRAYIDX4]], align 4 +; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096 +; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; ZVFHMIN: [[EXIT]]: +; ZVFHMIN-NEXT: ret void +; entry: br label %for.body @@ -48,9 +162,56 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-LABEL: define void @fmax32( ; CHECK-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 +; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 +; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[TMP8]], 4 +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]]) +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK: [[VECTOR_MEMCHECK]]: +; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP15]], 4 +; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP4]], 4 +; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]] +; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP16]] +; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]] +; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], [[TMP7]] +; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP9]], 4 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP18]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] +; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP19]], 4 +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP3]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP6]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = call @llvm.maximumnum.nxv4f32( [[WIDE_LOAD]], [[WIDE_LOAD5]]) +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i32 0 +; CHECK-NEXT: store [[TMP17]], ptr [[TMP11]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDVARS_IV]] ; CHECK-NEXT: [[IN1:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDVARS_IV]] @@ -60,10 +221,76 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: store float [[OUT]], ptr [[ARRAYIDX4]], align 4 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; +; ZVFHMIN-LABEL: define void @fmax32( +; ZVFHMIN-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; ZVFHMIN-NEXT: [[ENTRY:.*]]: +; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 +; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 +; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 +; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]] +; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; ZVFHMIN: [[VECTOR_MEMCHECK]]: +; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4 +; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; ZVFHMIN-NEXT: [[TMP6:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]] +; ZVFHMIN-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; ZVFHMIN-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4 +; ZVFHMIN-NEXT: [[TMP8:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]] +; ZVFHMIN-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] +; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; ZVFHMIN: [[VECTOR_PH]]: +; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4 +; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] +; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] +; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 4 +; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]] +; ZVFHMIN: [[VECTOR_BODY]]: +; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i32 0 +; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP14]], align 4 +; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw float, ptr [[TMP15]], i32 0 +; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP16]], align 4 +; ZVFHMIN-NEXT: [[TMP17:%.*]] = call @llvm.maximumnum.nxv4f32( [[WIDE_LOAD]], [[WIDE_LOAD5]]) +; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw float, ptr [[TMP18]], i32 0 +; ZVFHMIN-NEXT: store [[TMP17]], ptr [[TMP19]], align 4 +; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; ZVFHMIN: [[MIDDLE_BLOCK]]: +; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] +; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; ZVFHMIN: [[SCALAR_PH]]: +; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]] +; ZVFHMIN: [[FOR_BODY]]: +; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; ZVFHMIN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: [[IN1:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; ZVFHMIN-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: [[IN2:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; ZVFHMIN-NEXT: [[OUT:%.*]] = tail call float @llvm.maximumnum.f32(float [[IN1]], float [[IN2]]) +; ZVFHMIN-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: store float [[OUT]], ptr [[ARRAYIDX4]], align 4 +; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096 +; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; ZVFHMIN: [[EXIT]]: +; ZVFHMIN-NEXT: ret void +; entry: br label %for.body @@ -90,9 +317,56 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-LABEL: define void @fmin64( ; CHECK-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 +; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 +; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[TMP8]], 2 +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]]) +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK: [[VECTOR_MEMCHECK]]: +; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP15]], 2 +; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP4]], 8 +; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]] +; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP16]] +; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 8 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]] +; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], [[TMP7]] +; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP9]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP18]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] +; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP19]], 2 +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP3]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP6]], align 8 +; CHECK-NEXT: [[TMP17:%.*]] = call @llvm.minimumnum.nxv2f64( [[WIDE_LOAD]], [[WIDE_LOAD5]]) +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i32 0 +; CHECK-NEXT: store [[TMP17]], ptr [[TMP11]], align 8 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDVARS_IV]] ; CHECK-NEXT: [[IN1:%.*]] = load double, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDVARS_IV]] @@ -102,10 +376,76 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: store double [[OUT]], ptr [[ARRAYIDX4]], align 8 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; +; ZVFHMIN-LABEL: define void @fmin64( +; ZVFHMIN-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; ZVFHMIN-NEXT: [[ENTRY:.*]]: +; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 +; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 +; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 +; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]] +; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; ZVFHMIN: [[VECTOR_MEMCHECK]]: +; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8 +; ZVFHMIN-NEXT: [[TMP6:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]] +; ZVFHMIN-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; ZVFHMIN-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 8 +; ZVFHMIN-NEXT: [[TMP8:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]] +; ZVFHMIN-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] +; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; ZVFHMIN: [[VECTOR_PH]]: +; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2 +; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] +; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] +; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 2 +; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]] +; ZVFHMIN: [[VECTOR_BODY]]: +; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw double, ptr [[TMP13]], i32 0 +; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP14]], align 8 +; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw double, ptr [[TMP15]], i32 0 +; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP16]], align 8 +; ZVFHMIN-NEXT: [[TMP17:%.*]] = call @llvm.minimumnum.nxv2f64( [[WIDE_LOAD]], [[WIDE_LOAD5]]) +; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw double, ptr [[TMP18]], i32 0 +; ZVFHMIN-NEXT: store [[TMP17]], ptr [[TMP19]], align 8 +; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; ZVFHMIN: [[MIDDLE_BLOCK]]: +; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] +; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; ZVFHMIN: [[SCALAR_PH]]: +; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]] +; ZVFHMIN: [[FOR_BODY]]: +; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; ZVFHMIN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: [[IN1:%.*]] = load double, ptr [[ARRAYIDX]], align 8 +; ZVFHMIN-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: [[IN2:%.*]] = load double, ptr [[ARRAYIDX2]], align 8 +; ZVFHMIN-NEXT: [[OUT:%.*]] = tail call double @llvm.minimumnum.f64(double [[IN1]], double [[IN2]]) +; ZVFHMIN-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: store double [[OUT]], ptr [[ARRAYIDX4]], align 8 +; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096 +; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; ZVFHMIN: [[EXIT]]: +; ZVFHMIN-NEXT: ret void +; entry: br label %for.body @@ -132,9 +472,56 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-LABEL: define void @fmax64( ; CHECK-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 +; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 +; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[TMP8]], 2 +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]]) +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK: [[VECTOR_MEMCHECK]]: +; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP15]], 2 +; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP4]], 8 +; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]] +; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP16]] +; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 8 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]] +; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], [[TMP7]] +; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP9]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP18]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] +; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP19]], 2 +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP3]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP6]], align 8 +; CHECK-NEXT: [[TMP17:%.*]] = call @llvm.maximumnum.nxv2f64( [[WIDE_LOAD]], [[WIDE_LOAD5]]) +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i32 0 +; CHECK-NEXT: store [[TMP17]], ptr [[TMP11]], align 8 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDVARS_IV]] ; CHECK-NEXT: [[IN1:%.*]] = load double, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDVARS_IV]] @@ -144,10 +531,76 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: store double [[OUT]], ptr [[ARRAYIDX4]], align 8 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; +; ZVFHMIN-LABEL: define void @fmax64( +; ZVFHMIN-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; ZVFHMIN-NEXT: [[ENTRY:.*]]: +; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 +; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 +; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 +; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]] +; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; ZVFHMIN: [[VECTOR_MEMCHECK]]: +; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8 +; ZVFHMIN-NEXT: [[TMP6:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]] +; ZVFHMIN-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; ZVFHMIN-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 8 +; ZVFHMIN-NEXT: [[TMP8:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]] +; ZVFHMIN-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] +; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; ZVFHMIN: [[VECTOR_PH]]: +; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2 +; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] +; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] +; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 2 +; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]] +; ZVFHMIN: [[VECTOR_BODY]]: +; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw double, ptr [[TMP13]], i32 0 +; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP14]], align 8 +; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw double, ptr [[TMP15]], i32 0 +; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP16]], align 8 +; ZVFHMIN-NEXT: [[TMP17:%.*]] = call @llvm.maximumnum.nxv2f64( [[WIDE_LOAD]], [[WIDE_LOAD5]]) +; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw double, ptr [[TMP18]], i32 0 +; ZVFHMIN-NEXT: store [[TMP17]], ptr [[TMP19]], align 8 +; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; ZVFHMIN: [[MIDDLE_BLOCK]]: +; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] +; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; ZVFHMIN: [[SCALAR_PH]]: +; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]] +; ZVFHMIN: [[FOR_BODY]]: +; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; ZVFHMIN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: [[IN1:%.*]] = load double, ptr [[ARRAYIDX]], align 8 +; ZVFHMIN-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: [[IN2:%.*]] = load double, ptr [[ARRAYIDX2]], align 8 +; ZVFHMIN-NEXT: [[OUT:%.*]] = tail call double @llvm.maximumnum.f64(double [[IN1]], double [[IN2]]) +; ZVFHMIN-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: store double [[OUT]], ptr [[ARRAYIDX4]], align 8 +; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096 +; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; ZVFHMIN: [[EXIT]]: +; ZVFHMIN-NEXT: ret void +; entry: br label %for.body @@ -174,9 +627,56 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-LABEL: define void @fmin16( ; CHECK-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 +; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 +; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[TMP6]], 8 +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]]) +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK: [[VECTOR_MEMCHECK]]: +; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 8 +; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP16]], 2 +; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]] +; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP18]] +; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP16]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]] +; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], [[TMP19]] +; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 8 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 8 +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw half, ptr [[TMP2]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP3]], align 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw half, ptr [[TMP4]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP5]], align 2 +; CHECK-NEXT: [[TMP17:%.*]] = call @llvm.minimumnum.nxv8f16( [[WIDE_LOAD]], [[WIDE_LOAD5]]) +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw half, ptr [[TMP7]], i32 0 +; CHECK-NEXT: store [[TMP17]], ptr [[TMP8]], align 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDVARS_IV]] ; CHECK-NEXT: [[IN1:%.*]] = load half, ptr [[ARRAYIDX]], align 2 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDVARS_IV]] @@ -186,10 +686,76 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; +; ZVFHMIN-LABEL: define void @fmin16( +; ZVFHMIN-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; ZVFHMIN-NEXT: [[ENTRY:.*]]: +; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 +; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 +; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 +; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8 +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]] +; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; ZVFHMIN: [[VECTOR_MEMCHECK]]: +; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 8 +; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 +; ZVFHMIN-NEXT: [[TMP6:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]] +; ZVFHMIN-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; ZVFHMIN-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 2 +; ZVFHMIN-NEXT: [[TMP8:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]] +; ZVFHMIN-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] +; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; ZVFHMIN: [[VECTOR_PH]]: +; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 8 +; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] +; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] +; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 8 +; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]] +; ZVFHMIN: [[VECTOR_BODY]]: +; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw half, ptr [[TMP13]], i32 0 +; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP14]], align 2 +; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw half, ptr [[TMP15]], i32 0 +; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP16]], align 2 +; ZVFHMIN-NEXT: [[TMP17:%.*]] = call @llvm.minimumnum.nxv8f16( [[WIDE_LOAD]], [[WIDE_LOAD5]]) +; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw half, ptr [[TMP18]], i32 0 +; ZVFHMIN-NEXT: store [[TMP17]], ptr [[TMP19]], align 2 +; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; ZVFHMIN: [[MIDDLE_BLOCK]]: +; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] +; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; ZVFHMIN: [[SCALAR_PH]]: +; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]] +; ZVFHMIN: [[FOR_BODY]]: +; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; ZVFHMIN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: [[IN1:%.*]] = load half, ptr [[ARRAYIDX]], align 2 +; ZVFHMIN-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: [[IN2:%.*]] = load half, ptr [[ARRAYIDX2]], align 2 +; ZVFHMIN-NEXT: [[OUT:%.*]] = tail call half @llvm.minimumnum.f16(half [[IN1]], half [[IN2]]) +; ZVFHMIN-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2 +; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096 +; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; ZVFHMIN: [[EXIT]]: +; ZVFHMIN-NEXT: ret void +; entry: br label %for.body @@ -216,9 +782,56 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-LABEL: define void @fmax16( ; CHECK-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 +; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 +; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[TMP6]], 8 +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]]) +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK: [[VECTOR_MEMCHECK]]: +; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 8 +; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP16]], 2 +; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]] +; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP18]] +; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP16]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]] +; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], [[TMP19]] +; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 8 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 8 +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw half, ptr [[TMP2]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP3]], align 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw half, ptr [[TMP4]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP5]], align 2 +; CHECK-NEXT: [[TMP17:%.*]] = call @llvm.maximumnum.nxv8f16( [[WIDE_LOAD]], [[WIDE_LOAD5]]) +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw half, ptr [[TMP7]], i32 0 +; CHECK-NEXT: store [[TMP17]], ptr [[TMP8]], align 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDVARS_IV]] ; CHECK-NEXT: [[IN1:%.*]] = load half, ptr [[ARRAYIDX]], align 2 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDVARS_IV]] @@ -228,10 +841,76 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; +; ZVFHMIN-LABEL: define void @fmax16( +; ZVFHMIN-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; ZVFHMIN-NEXT: [[ENTRY:.*]]: +; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 +; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 +; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 +; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8 +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]] +; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; ZVFHMIN: [[VECTOR_MEMCHECK]]: +; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 8 +; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 +; ZVFHMIN-NEXT: [[TMP6:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]] +; ZVFHMIN-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; ZVFHMIN-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 2 +; ZVFHMIN-NEXT: [[TMP8:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]] +; ZVFHMIN-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] +; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; ZVFHMIN: [[VECTOR_PH]]: +; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 8 +; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] +; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] +; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 8 +; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]] +; ZVFHMIN: [[VECTOR_BODY]]: +; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw half, ptr [[TMP13]], i32 0 +; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP14]], align 2 +; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw half, ptr [[TMP15]], i32 0 +; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP16]], align 2 +; ZVFHMIN-NEXT: [[TMP17:%.*]] = call @llvm.maximumnum.nxv8f16( [[WIDE_LOAD]], [[WIDE_LOAD5]]) +; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] +; ZVFHMIN-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw half, ptr [[TMP18]], i32 0 +; ZVFHMIN-NEXT: store [[TMP17]], ptr [[TMP19]], align 2 +; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; ZVFHMIN: [[MIDDLE_BLOCK]]: +; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] +; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; ZVFHMIN: [[SCALAR_PH]]: +; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]] +; ZVFHMIN: [[FOR_BODY]]: +; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; ZVFHMIN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: [[IN1:%.*]] = load half, ptr [[ARRAYIDX]], align 2 +; ZVFHMIN-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: [[IN2:%.*]] = load half, ptr [[ARRAYIDX2]], align 2 +; ZVFHMIN-NEXT: [[OUT:%.*]] = tail call half @llvm.maximumnum.f16(half [[IN1]], half [[IN2]]) +; ZVFHMIN-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[IV]] +; ZVFHMIN-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2 +; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096 +; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; ZVFHMIN: [[EXIT]]: +; ZVFHMIN-NEXT: ret void +; entry: br label %for.body @@ -253,3 +932,34 @@ exit: } declare half @llvm.maximumnum.f16(half, half) +;. +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} +; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} +; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]} +; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} +; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]} +; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} +; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]} +; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} +; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]} +; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]} +; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]} +;. +; ZVFHMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; ZVFHMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} +; ZVFHMIN: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; ZVFHMIN: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]} +; ZVFHMIN: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} +; ZVFHMIN: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]} +; ZVFHMIN: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} +; ZVFHMIN: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]} +; ZVFHMIN: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} +; ZVFHMIN: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]} +; ZVFHMIN: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} +; ZVFHMIN: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]} +; ZVFHMIN: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]} +; ZVFHMIN: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]} +;. diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/fminimumnum.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/fminimumnum.ll index 920abfad776e0..4d43f3f3e55f0 100644 --- a/llvm/test/Transforms/SLPVectorizer/RISCV/fminimumnum.ll +++ b/llvm/test/Transforms/SLPVectorizer/RISCV/fminimumnum.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt --passes=slp-vectorizer --mtriple=riscv64 -mattr="+zvfh,+v,+zfh" -S < %s | FileCheck %s +; RUN: opt --passes=slp-vectorizer --mtriple=riscv64 -mattr="+v,+zvfh" -S < %s | FileCheck %s +; RUN: opt --passes=slp-vectorizer --mtriple=riscv64 -mattr="+v,+zvfhmin" -S < %s | FileCheck %s --check-prefix=ZVFHMIN @input1_f32 = global [9 x float] zeroinitializer, align 16 @input2_f32 = global [9 x float] zeroinitializer, align 16 @@ -15,44 +16,29 @@ define void @fmin32() { ; CHECK-LABEL: define void @fmin32( ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr @input1_f32, align 16 -; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr @input2_f32, align 16 -; CHECK-NEXT: [[TMP2:%.*]] = tail call float @llvm.minimumnum.f32(float [[TMP0]], float [[TMP1]]) -; CHECK-NEXT: store float [[TMP2]], ptr @output_f32, align 16 -; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 4), align 4 -; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 4), align 4 -; CHECK-NEXT: [[TMP5:%.*]] = tail call float @llvm.minimumnum.f32(float [[TMP3]], float [[TMP4]]) -; CHECK-NEXT: store float [[TMP5]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 4), align 4 -; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 8), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 8), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = tail call float @llvm.minimumnum.f32(float [[TMP6]], float [[TMP7]]) -; CHECK-NEXT: store float [[TMP8]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 8), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 12), align 4 -; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 12), align 4 -; CHECK-NEXT: [[TMP11:%.*]] = tail call float @llvm.minimumnum.f32(float [[TMP9]], float [[TMP10]]) -; CHECK-NEXT: store float [[TMP11]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 12), align 4 -; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 16), align 16 -; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 16), align 16 -; CHECK-NEXT: [[TMP14:%.*]] = tail call float @llvm.minimumnum.f32(float [[TMP12]], float [[TMP13]]) -; CHECK-NEXT: store float [[TMP14]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 16), align 16 -; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 20), align 4 -; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 20), align 4 -; CHECK-NEXT: [[TMP17:%.*]] = tail call float @llvm.minimumnum.f32(float [[TMP15]], float [[TMP16]]) -; CHECK-NEXT: store float [[TMP17]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 20), align 4 -; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 24), align 8 -; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 24), align 8 -; CHECK-NEXT: [[TMP20:%.*]] = tail call float @llvm.minimumnum.f32(float [[TMP18]], float [[TMP19]]) -; CHECK-NEXT: store float [[TMP20]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 24), align 8 -; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 28), align 4 -; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 28), align 4 -; CHECK-NEXT: [[TMP23:%.*]] = tail call float @llvm.minimumnum.f32(float [[TMP21]], float [[TMP22]]) -; CHECK-NEXT: store float [[TMP23]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 28), align 4 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x float>, ptr @input1_f32, align 16 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @input2_f32, align 16 +; CHECK-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.minimumnum.v8f32(<8 x float> [[TMP0]], <8 x float> [[TMP1]]) +; CHECK-NEXT: store <8 x float> [[TMP2]], ptr @output_f32, align 16 ; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 32), align 16 ; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 32), align 16 ; CHECK-NEXT: [[TMP26:%.*]] = tail call float @llvm.minimumnum.f32(float [[TMP24]], float [[TMP25]]) ; CHECK-NEXT: store float [[TMP26]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 32), align 16 ; CHECK-NEXT: ret void ; +; ZVFHMIN-LABEL: define void @fmin32( +; ZVFHMIN-SAME: ) #[[ATTR0:[0-9]+]] { +; ZVFHMIN-NEXT: [[ENTRY:.*:]] +; ZVFHMIN-NEXT: [[TMP0:%.*]] = load <8 x float>, ptr @input1_f32, align 16 +; ZVFHMIN-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @input2_f32, align 16 +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.minimumnum.v8f32(<8 x float> [[TMP0]], <8 x float> [[TMP1]]) +; ZVFHMIN-NEXT: store <8 x float> [[TMP2]], ptr @output_f32, align 16 +; ZVFHMIN-NEXT: [[INPUT8_1:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 32), align 16 +; ZVFHMIN-NEXT: [[INPUT8_2:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 32), align 16 +; ZVFHMIN-NEXT: [[OUTPUT8:%.*]] = tail call float @llvm.minimumnum.f32(float [[INPUT8_1]], float [[INPUT8_2]]) +; ZVFHMIN-NEXT: store float [[OUTPUT8]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 32), align 16 +; ZVFHMIN-NEXT: ret void +; entry: %input0_0 = load float, ptr @input1_f32, align 16 %input0_1 = load float, ptr @input2_f32, align 16 @@ -99,44 +85,29 @@ define void @fmax32() { ; CHECK-LABEL: define void @fmax32( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr @input1_f32, align 16 -; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr @input2_f32, align 16 -; CHECK-NEXT: [[TMP2:%.*]] = tail call float @llvm.maximumnum.f32(float [[TMP0]], float [[TMP1]]) -; CHECK-NEXT: store float [[TMP2]], ptr @output_f32, align 16 -; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 4), align 4 -; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 4), align 4 -; CHECK-NEXT: [[TMP5:%.*]] = tail call float @llvm.maximumnum.f32(float [[TMP3]], float [[TMP4]]) -; CHECK-NEXT: store float [[TMP5]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 4), align 4 -; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 8), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 8), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = tail call float @llvm.maximumnum.f32(float [[TMP6]], float [[TMP7]]) -; CHECK-NEXT: store float [[TMP8]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 8), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 12), align 4 -; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 12), align 4 -; CHECK-NEXT: [[TMP11:%.*]] = tail call float @llvm.maximumnum.f32(float [[TMP9]], float [[TMP10]]) -; CHECK-NEXT: store float [[TMP11]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 12), align 4 -; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 16), align 16 -; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 16), align 16 -; CHECK-NEXT: [[TMP14:%.*]] = tail call float @llvm.maximumnum.f32(float [[TMP12]], float [[TMP13]]) -; CHECK-NEXT: store float [[TMP14]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 16), align 16 -; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 20), align 4 -; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 20), align 4 -; CHECK-NEXT: [[TMP17:%.*]] = tail call float @llvm.maximumnum.f32(float [[TMP15]], float [[TMP16]]) -; CHECK-NEXT: store float [[TMP17]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 20), align 4 -; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 24), align 8 -; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 24), align 8 -; CHECK-NEXT: [[TMP20:%.*]] = tail call float @llvm.maximumnum.f32(float [[TMP18]], float [[TMP19]]) -; CHECK-NEXT: store float [[TMP20]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 24), align 8 -; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 28), align 4 -; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 28), align 4 -; CHECK-NEXT: [[TMP23:%.*]] = tail call float @llvm.maximumnum.f32(float [[TMP21]], float [[TMP22]]) -; CHECK-NEXT: store float [[TMP23]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 28), align 4 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x float>, ptr @input1_f32, align 16 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @input2_f32, align 16 +; CHECK-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.maximumnum.v8f32(<8 x float> [[TMP0]], <8 x float> [[TMP1]]) +; CHECK-NEXT: store <8 x float> [[TMP2]], ptr @output_f32, align 16 ; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 32), align 16 ; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 32), align 16 ; CHECK-NEXT: [[TMP26:%.*]] = tail call float @llvm.maximumnum.f32(float [[TMP24]], float [[TMP25]]) ; CHECK-NEXT: store float [[TMP26]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 32), align 16 ; CHECK-NEXT: ret void ; +; ZVFHMIN-LABEL: define void @fmax32( +; ZVFHMIN-SAME: ) #[[ATTR0]] { +; ZVFHMIN-NEXT: [[ENTRY:.*:]] +; ZVFHMIN-NEXT: [[TMP0:%.*]] = load <8 x float>, ptr @input1_f32, align 16 +; ZVFHMIN-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @input2_f32, align 16 +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.maximumnum.v8f32(<8 x float> [[TMP0]], <8 x float> [[TMP1]]) +; ZVFHMIN-NEXT: store <8 x float> [[TMP2]], ptr @output_f32, align 16 +; ZVFHMIN-NEXT: [[INPUT8_1:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input1_f32, i64 32), align 16 +; ZVFHMIN-NEXT: [[INPUT8_2:%.*]] = load float, ptr getelementptr inbounds nuw (i8, ptr @input2_f32, i64 32), align 16 +; ZVFHMIN-NEXT: [[OUTPUT8:%.*]] = tail call float @llvm.maximumnum.f32(float [[INPUT8_1]], float [[INPUT8_2]]) +; ZVFHMIN-NEXT: store float [[OUTPUT8]], ptr getelementptr inbounds nuw (i8, ptr @output_f32, i64 32), align 16 +; ZVFHMIN-NEXT: ret void +; entry: %input0_0 = load float, ptr @input1_f32, align 16 %input0_1 = load float, ptr @input2_f32, align 16 @@ -183,44 +154,37 @@ define void @fmin64() { ; CHECK-LABEL: define void @fmin64( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr @input1_f64, align 16 -; CHECK-NEXT: [[TMP1:%.*]] = load double, ptr @input2_f64, align 16 -; CHECK-NEXT: [[TMP2:%.*]] = tail call double @llvm.minimumnum.f64(double [[TMP0]], double [[TMP1]]) -; CHECK-NEXT: store double [[TMP2]], ptr @output_f64, align 16 -; CHECK-NEXT: [[TMP3:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 8), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 8), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = tail call double @llvm.minimumnum.f64(double [[TMP3]], double [[TMP4]]) -; CHECK-NEXT: store double [[TMP5]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 8), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 16), align 16 -; CHECK-NEXT: [[TMP7:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 16), align 16 -; CHECK-NEXT: [[TMP8:%.*]] = tail call double @llvm.minimumnum.f64(double [[TMP6]], double [[TMP7]]) -; CHECK-NEXT: store double [[TMP8]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 16), align 16 -; CHECK-NEXT: [[TMP9:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 24), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 24), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = tail call double @llvm.minimumnum.f64(double [[TMP9]], double [[TMP10]]) -; CHECK-NEXT: store double [[TMP11]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 24), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 32), align 16 -; CHECK-NEXT: [[TMP13:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 32), align 16 -; CHECK-NEXT: [[TMP14:%.*]] = tail call double @llvm.minimumnum.f64(double [[TMP12]], double [[TMP13]]) -; CHECK-NEXT: store double [[TMP14]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 32), align 16 -; CHECK-NEXT: [[TMP15:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 40), align 8 -; CHECK-NEXT: [[TMP16:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 40), align 8 -; CHECK-NEXT: [[TMP17:%.*]] = tail call double @llvm.minimumnum.f64(double [[TMP15]], double [[TMP16]]) -; CHECK-NEXT: store double [[TMP17]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 40), align 8 -; CHECK-NEXT: [[TMP18:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 48), align 16 -; CHECK-NEXT: [[TMP19:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 48), align 16 -; CHECK-NEXT: [[TMP20:%.*]] = tail call double @llvm.minimumnum.f64(double [[TMP18]], double [[TMP19]]) -; CHECK-NEXT: store double [[TMP20]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 48), align 16 -; CHECK-NEXT: [[TMP21:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 56), align 8 -; CHECK-NEXT: [[TMP22:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 56), align 8 -; CHECK-NEXT: [[TMP23:%.*]] = tail call double @llvm.minimumnum.f64(double [[TMP21]], double [[TMP22]]) -; CHECK-NEXT: store double [[TMP23]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 56), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x double>, ptr @input1_f64, align 16 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @input2_f64, align 16 +; CHECK-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.minimumnum.v4f64(<4 x double> [[TMP0]], <4 x double> [[TMP1]]) +; CHECK-NEXT: store <4 x double> [[TMP2]], ptr @output_f64, align 16 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 32), align 16 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x double>, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 32), align 16 +; CHECK-NEXT: [[TMP5:%.*]] = call <4 x double> @llvm.minimumnum.v4f64(<4 x double> [[TMP3]], <4 x double> [[TMP4]]) +; CHECK-NEXT: store <4 x double> [[TMP5]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 32), align 16 ; CHECK-NEXT: [[TMP24:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 64), align 16 ; CHECK-NEXT: [[TMP25:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 64), align 16 ; CHECK-NEXT: [[TMP26:%.*]] = tail call double @llvm.minimumnum.f64(double [[TMP24]], double [[TMP25]]) ; CHECK-NEXT: store double [[TMP26]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 64), align 16 ; CHECK-NEXT: ret void ; +; ZVFHMIN-LABEL: define void @fmin64( +; ZVFHMIN-SAME: ) #[[ATTR0]] { +; ZVFHMIN-NEXT: [[ENTRY:.*:]] +; ZVFHMIN-NEXT: [[TMP0:%.*]] = load <4 x double>, ptr @input1_f64, align 16 +; ZVFHMIN-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @input2_f64, align 16 +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.minimumnum.v4f64(<4 x double> [[TMP0]], <4 x double> [[TMP1]]) +; ZVFHMIN-NEXT: store <4 x double> [[TMP2]], ptr @output_f64, align 16 +; ZVFHMIN-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 32), align 16 +; ZVFHMIN-NEXT: [[TMP4:%.*]] = load <4 x double>, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 32), align 16 +; ZVFHMIN-NEXT: [[TMP5:%.*]] = call <4 x double> @llvm.minimumnum.v4f64(<4 x double> [[TMP3]], <4 x double> [[TMP4]]) +; ZVFHMIN-NEXT: store <4 x double> [[TMP5]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 32), align 16 +; ZVFHMIN-NEXT: [[INPUT8_1:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 64), align 16 +; ZVFHMIN-NEXT: [[INPUT8_2:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 64), align 16 +; ZVFHMIN-NEXT: [[OUTPUT8:%.*]] = tail call double @llvm.minimumnum.f64(double [[INPUT8_1]], double [[INPUT8_2]]) +; ZVFHMIN-NEXT: store double [[OUTPUT8]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 64), align 16 +; ZVFHMIN-NEXT: ret void +; entry: %input0_0 = load double, ptr @input1_f64, align 16 %input0_1 = load double, ptr @input2_f64, align 16 @@ -267,44 +231,37 @@ define void @fmax64() { ; CHECK-LABEL: define void @fmax64( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr @input1_f64, align 16 -; CHECK-NEXT: [[TMP1:%.*]] = load double, ptr @input2_f64, align 16 -; CHECK-NEXT: [[TMP2:%.*]] = tail call double @llvm.maximumnum.f64(double [[TMP0]], double [[TMP1]]) -; CHECK-NEXT: store double [[TMP2]], ptr @output_f64, align 16 -; CHECK-NEXT: [[TMP3:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 8), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 8), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = tail call double @llvm.maximumnum.f64(double [[TMP3]], double [[TMP4]]) -; CHECK-NEXT: store double [[TMP5]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 8), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 16), align 16 -; CHECK-NEXT: [[TMP7:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 16), align 16 -; CHECK-NEXT: [[TMP8:%.*]] = tail call double @llvm.maximumnum.f64(double [[TMP6]], double [[TMP7]]) -; CHECK-NEXT: store double [[TMP8]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 16), align 16 -; CHECK-NEXT: [[TMP9:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 24), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 24), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = tail call double @llvm.maximumnum.f64(double [[TMP9]], double [[TMP10]]) -; CHECK-NEXT: store double [[TMP11]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 24), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 32), align 16 -; CHECK-NEXT: [[TMP13:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 32), align 16 -; CHECK-NEXT: [[TMP14:%.*]] = tail call double @llvm.maximumnum.f64(double [[TMP12]], double [[TMP13]]) -; CHECK-NEXT: store double [[TMP14]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 32), align 16 -; CHECK-NEXT: [[TMP15:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 40), align 8 -; CHECK-NEXT: [[TMP16:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 40), align 8 -; CHECK-NEXT: [[TMP17:%.*]] = tail call double @llvm.maximumnum.f64(double [[TMP15]], double [[TMP16]]) -; CHECK-NEXT: store double [[TMP17]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 40), align 8 -; CHECK-NEXT: [[TMP18:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 48), align 16 -; CHECK-NEXT: [[TMP19:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 48), align 16 -; CHECK-NEXT: [[TMP20:%.*]] = tail call double @llvm.maximumnum.f64(double [[TMP18]], double [[TMP19]]) -; CHECK-NEXT: store double [[TMP20]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 48), align 16 -; CHECK-NEXT: [[TMP21:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 56), align 8 -; CHECK-NEXT: [[TMP22:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 56), align 8 -; CHECK-NEXT: [[TMP23:%.*]] = tail call double @llvm.maximumnum.f64(double [[TMP21]], double [[TMP22]]) -; CHECK-NEXT: store double [[TMP23]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 56), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x double>, ptr @input1_f64, align 16 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @input2_f64, align 16 +; CHECK-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.maximumnum.v4f64(<4 x double> [[TMP0]], <4 x double> [[TMP1]]) +; CHECK-NEXT: store <4 x double> [[TMP2]], ptr @output_f64, align 16 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 32), align 16 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x double>, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 32), align 16 +; CHECK-NEXT: [[TMP5:%.*]] = call <4 x double> @llvm.maximumnum.v4f64(<4 x double> [[TMP3]], <4 x double> [[TMP4]]) +; CHECK-NEXT: store <4 x double> [[TMP5]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 32), align 16 ; CHECK-NEXT: [[TMP24:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 64), align 16 ; CHECK-NEXT: [[TMP25:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 64), align 16 ; CHECK-NEXT: [[TMP26:%.*]] = tail call double @llvm.maximumnum.f64(double [[TMP24]], double [[TMP25]]) ; CHECK-NEXT: store double [[TMP26]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 64), align 16 ; CHECK-NEXT: ret void ; +; ZVFHMIN-LABEL: define void @fmax64( +; ZVFHMIN-SAME: ) #[[ATTR0]] { +; ZVFHMIN-NEXT: [[ENTRY:.*:]] +; ZVFHMIN-NEXT: [[TMP0:%.*]] = load <4 x double>, ptr @input1_f64, align 16 +; ZVFHMIN-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @input2_f64, align 16 +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.maximumnum.v4f64(<4 x double> [[TMP0]], <4 x double> [[TMP1]]) +; ZVFHMIN-NEXT: store <4 x double> [[TMP2]], ptr @output_f64, align 16 +; ZVFHMIN-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 32), align 16 +; ZVFHMIN-NEXT: [[TMP4:%.*]] = load <4 x double>, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 32), align 16 +; ZVFHMIN-NEXT: [[TMP5:%.*]] = call <4 x double> @llvm.maximumnum.v4f64(<4 x double> [[TMP3]], <4 x double> [[TMP4]]) +; ZVFHMIN-NEXT: store <4 x double> [[TMP5]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 32), align 16 +; ZVFHMIN-NEXT: [[INPUT8_1:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input1_f64, i64 64), align 16 +; ZVFHMIN-NEXT: [[INPUT8_2:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @input2_f64, i64 64), align 16 +; ZVFHMIN-NEXT: [[OUTPUT8:%.*]] = tail call double @llvm.maximumnum.f64(double [[INPUT8_1]], double [[INPUT8_2]]) +; ZVFHMIN-NEXT: store double [[OUTPUT8]], ptr getelementptr inbounds nuw (i8, ptr @output_f64, i64 64), align 16 +; ZVFHMIN-NEXT: ret void +; entry: %input0_0 = load double, ptr @input1_f64, align 16 %input0_1 = load double, ptr @input2_f64, align 16 @@ -351,44 +308,29 @@ define void @fmin16() { ; CHECK-LABEL: define void @fmin16( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load half, ptr @input1_f16, align 16 -; CHECK-NEXT: [[TMP1:%.*]] = load half, ptr @input2_f16, align 16 -; CHECK-NEXT: [[TMP2:%.*]] = tail call half @llvm.minimumnum.f16(half [[TMP0]], half [[TMP1]]) -; CHECK-NEXT: store half [[TMP2]], ptr @output_f16, align 16 -; CHECK-NEXT: [[TMP3:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 2), align 2 -; CHECK-NEXT: [[TMP4:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 2), align 2 -; CHECK-NEXT: [[TMP5:%.*]] = tail call half @llvm.minimumnum.f16(half [[TMP3]], half [[TMP4]]) -; CHECK-NEXT: store half [[TMP5]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 2), align 2 -; CHECK-NEXT: [[TMP6:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 4), align 4 -; CHECK-NEXT: [[TMP7:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 4), align 4 -; CHECK-NEXT: [[TMP8:%.*]] = tail call half @llvm.minimumnum.f16(half [[TMP6]], half [[TMP7]]) -; CHECK-NEXT: store half [[TMP8]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 4), align 4 -; CHECK-NEXT: [[TMP9:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 6), align 2 -; CHECK-NEXT: [[TMP10:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 6), align 2 -; CHECK-NEXT: [[TMP11:%.*]] = tail call half @llvm.minimumnum.f16(half [[TMP9]], half [[TMP10]]) -; CHECK-NEXT: store half [[TMP11]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 6), align 2 -; CHECK-NEXT: [[TMP12:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 8), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 8), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = tail call half @llvm.minimumnum.f16(half [[TMP12]], half [[TMP13]]) -; CHECK-NEXT: store half [[TMP14]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 8), align 8 -; CHECK-NEXT: [[TMP15:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 10), align 2 -; CHECK-NEXT: [[TMP16:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 10), align 2 -; CHECK-NEXT: [[TMP17:%.*]] = tail call half @llvm.minimumnum.f16(half [[TMP15]], half [[TMP16]]) -; CHECK-NEXT: store half [[TMP17]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 10), align 2 -; CHECK-NEXT: [[TMP18:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 12), align 4 -; CHECK-NEXT: [[TMP19:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 12), align 4 -; CHECK-NEXT: [[TMP20:%.*]] = tail call half @llvm.minimumnum.f16(half [[TMP18]], half [[TMP19]]) -; CHECK-NEXT: store half [[TMP20]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 12), align 4 -; CHECK-NEXT: [[TMP21:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 14), align 2 -; CHECK-NEXT: [[TMP22:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 14), align 2 -; CHECK-NEXT: [[TMP23:%.*]] = tail call half @llvm.minimumnum.f16(half [[TMP21]], half [[TMP22]]) -; CHECK-NEXT: store half [[TMP23]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 14), align 2 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x half>, ptr @input1_f16, align 16 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x half>, ptr @input2_f16, align 16 +; CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.minimumnum.v8f16(<8 x half> [[TMP0]], <8 x half> [[TMP1]]) +; CHECK-NEXT: store <8 x half> [[TMP2]], ptr @output_f16, align 16 ; CHECK-NEXT: [[TMP24:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 16), align 16 ; CHECK-NEXT: [[TMP25:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 16), align 16 ; CHECK-NEXT: [[TMP26:%.*]] = tail call half @llvm.minimumnum.f16(half [[TMP24]], half [[TMP25]]) ; CHECK-NEXT: store half [[TMP26]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 16), align 16 ; CHECK-NEXT: ret void ; +; ZVFHMIN-LABEL: define void @fmin16( +; ZVFHMIN-SAME: ) #[[ATTR0]] { +; ZVFHMIN-NEXT: [[ENTRY:.*:]] +; ZVFHMIN-NEXT: [[TMP0:%.*]] = load <8 x half>, ptr @input1_f16, align 16 +; ZVFHMIN-NEXT: [[TMP1:%.*]] = load <8 x half>, ptr @input2_f16, align 16 +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.minimumnum.v8f16(<8 x half> [[TMP0]], <8 x half> [[TMP1]]) +; ZVFHMIN-NEXT: store <8 x half> [[TMP2]], ptr @output_f16, align 16 +; ZVFHMIN-NEXT: [[INPUT8_1:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 16), align 16 +; ZVFHMIN-NEXT: [[INPUT8_2:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 16), align 16 +; ZVFHMIN-NEXT: [[OUTPUT8:%.*]] = tail call half @llvm.minimumnum.f16(half [[INPUT8_1]], half [[INPUT8_2]]) +; ZVFHMIN-NEXT: store half [[OUTPUT8]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 16), align 16 +; ZVFHMIN-NEXT: ret void +; entry: %input0_0 = load half, ptr @input1_f16, align 16 %input0_1 = load half, ptr @input2_f16, align 16 @@ -435,44 +377,29 @@ define void @fmax16() { ; CHECK-LABEL: define void @fmax16( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load half, ptr @input1_f16, align 16 -; CHECK-NEXT: [[TMP1:%.*]] = load half, ptr @input2_f16, align 16 -; CHECK-NEXT: [[TMP2:%.*]] = tail call half @llvm.maximumnum.f16(half [[TMP0]], half [[TMP1]]) -; CHECK-NEXT: store half [[TMP2]], ptr @output_f16, align 16 -; CHECK-NEXT: [[TMP3:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 2), align 2 -; CHECK-NEXT: [[TMP4:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 2), align 2 -; CHECK-NEXT: [[TMP5:%.*]] = tail call half @llvm.maximumnum.f16(half [[TMP3]], half [[TMP4]]) -; CHECK-NEXT: store half [[TMP5]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 2), align 2 -; CHECK-NEXT: [[TMP6:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 4), align 4 -; CHECK-NEXT: [[TMP7:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 4), align 4 -; CHECK-NEXT: [[TMP8:%.*]] = tail call half @llvm.maximumnum.f16(half [[TMP6]], half [[TMP7]]) -; CHECK-NEXT: store half [[TMP8]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 4), align 4 -; CHECK-NEXT: [[TMP9:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 6), align 2 -; CHECK-NEXT: [[TMP10:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 6), align 2 -; CHECK-NEXT: [[TMP11:%.*]] = tail call half @llvm.maximumnum.f16(half [[TMP9]], half [[TMP10]]) -; CHECK-NEXT: store half [[TMP11]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 6), align 2 -; CHECK-NEXT: [[TMP12:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 8), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 8), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = tail call half @llvm.maximumnum.f16(half [[TMP12]], half [[TMP13]]) -; CHECK-NEXT: store half [[TMP14]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 8), align 8 -; CHECK-NEXT: [[TMP15:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 10), align 2 -; CHECK-NEXT: [[TMP16:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 10), align 2 -; CHECK-NEXT: [[TMP17:%.*]] = tail call half @llvm.maximumnum.f16(half [[TMP15]], half [[TMP16]]) -; CHECK-NEXT: store half [[TMP17]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 10), align 2 -; CHECK-NEXT: [[TMP18:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 12), align 4 -; CHECK-NEXT: [[TMP19:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 12), align 4 -; CHECK-NEXT: [[TMP20:%.*]] = tail call half @llvm.maximumnum.f16(half [[TMP18]], half [[TMP19]]) -; CHECK-NEXT: store half [[TMP20]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 12), align 4 -; CHECK-NEXT: [[TMP21:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 14), align 2 -; CHECK-NEXT: [[TMP22:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 14), align 2 -; CHECK-NEXT: [[TMP23:%.*]] = tail call half @llvm.maximumnum.f16(half [[TMP21]], half [[TMP22]]) -; CHECK-NEXT: store half [[TMP23]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 14), align 2 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x half>, ptr @input1_f16, align 16 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x half>, ptr @input2_f16, align 16 +; CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.maximumnum.v8f16(<8 x half> [[TMP0]], <8 x half> [[TMP1]]) +; CHECK-NEXT: store <8 x half> [[TMP2]], ptr @output_f16, align 16 ; CHECK-NEXT: [[TMP24:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 16), align 16 ; CHECK-NEXT: [[TMP25:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 16), align 16 ; CHECK-NEXT: [[TMP26:%.*]] = tail call half @llvm.maximumnum.f16(half [[TMP24]], half [[TMP25]]) ; CHECK-NEXT: store half [[TMP26]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 16), align 16 ; CHECK-NEXT: ret void ; +; ZVFHMIN-LABEL: define void @fmax16( +; ZVFHMIN-SAME: ) #[[ATTR0]] { +; ZVFHMIN-NEXT: [[ENTRY:.*:]] +; ZVFHMIN-NEXT: [[TMP0:%.*]] = load <8 x half>, ptr @input1_f16, align 16 +; ZVFHMIN-NEXT: [[TMP1:%.*]] = load <8 x half>, ptr @input2_f16, align 16 +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.maximumnum.v8f16(<8 x half> [[TMP0]], <8 x half> [[TMP1]]) +; ZVFHMIN-NEXT: store <8 x half> [[TMP2]], ptr @output_f16, align 16 +; ZVFHMIN-NEXT: [[INPUT8_1:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input1_f16, i64 16), align 16 +; ZVFHMIN-NEXT: [[INPUT8_2:%.*]] = load half, ptr getelementptr inbounds nuw (i8, ptr @input2_f16, i64 16), align 16 +; ZVFHMIN-NEXT: [[OUTPUT8:%.*]] = tail call half @llvm.maximumnum.f16(half [[INPUT8_1]], half [[INPUT8_2]]) +; ZVFHMIN-NEXT: store half [[OUTPUT8]], ptr getelementptr inbounds nuw (i8, ptr @output_f16, i64 16), align 16 +; ZVFHMIN-NEXT: ret void +; entry: %input0_0 = load half, ptr @input1_f16, align 16 %input0_1 = load half, ptr @input2_f16, align 16