Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 76 additions & 0 deletions llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -401,6 +401,8 @@ namespace {
SDValue PromoteExtend(SDValue Op);
bool PromoteLoad(SDValue Op);

SDValue foldShiftToAvg(SDNode *N);

SDValue combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS,
SDValue RHS, SDValue True, SDValue False,
ISD::CondCode CC);
Expand Down Expand Up @@ -5351,6 +5353,27 @@ SDValue DAGCombiner::visitAVG(SDNode *N) {
DAG.getNode(ISD::ADD, DL, VT, N0, DAG.getAllOnesConstant(DL, VT)));
}

// Fold avgfloor((add nw x,y), 1) -> avgceil(x,y)
// Fold avgfloor((add nw x,1), y) -> avgceil(x,y)
if ((Opcode == ISD::AVGFLOORU && hasOperation(ISD::AVGCEILU, VT)) ||
(Opcode == ISD::AVGFLOORS && hasOperation(ISD::AVGCEILS, VT))) {
SDValue Add;
if (sd_match(N,
m_c_BinOp(Opcode,
m_AllOf(m_Value(Add), m_Add(m_Value(X), m_Value(Y))),
m_One())) ||
sd_match(N, m_c_BinOp(Opcode,
m_AllOf(m_Value(Add), m_Add(m_Value(X), m_One())),
m_Value(Y)))) {

if (IsSigned && Add->getFlags().hasNoSignedWrap())
return DAG.getNode(ISD::AVGCEILS, DL, VT, X, Y);

if (!IsSigned && Add->getFlags().hasNoUnsignedWrap())
return DAG.getNode(ISD::AVGCEILU, DL, VT, X, Y);
}
}

return SDValue();
}

Expand Down Expand Up @@ -10629,6 +10652,9 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
if (SDValue NarrowLoad = reduceLoadWidth(N))
return NarrowLoad;

if (SDValue AVG = foldShiftToAvg(N))
return AVG;

return SDValue();
}

Expand Down Expand Up @@ -10883,6 +10909,9 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
if (SDValue MULH = combineShiftToMULH(N, DL, DAG, TLI))
return MULH;

if (SDValue AVG = foldShiftToAvg(N))
return AVG;

return SDValue();
}

Expand Down Expand Up @@ -11396,6 +11425,53 @@ static SDValue combineMinNumMaxNumImpl(const SDLoc &DL, EVT VT, SDValue LHS,
}
}

SDValue DAGCombiner::foldShiftToAvg(SDNode *N) {
const unsigned Opcode = N->getOpcode();

// Convert (sr[al] (add n[su]w x, y)) -> (avgfloor[su] x, y)
if (Opcode != ISD::SRA && Opcode != ISD::SRL)
return SDValue();

unsigned FloorISD = 0;
auto VT = N->getValueType(0);
bool IsUnsigned = false;

// Decide wether signed or unsigned.
switch (Opcode) {
case ISD::SRA:
if (!hasOperation(ISD::AVGFLOORS, VT))
return SDValue();
FloorISD = ISD::AVGFLOORS;
break;
case ISD::SRL:
IsUnsigned = true;
if (!hasOperation(ISD::AVGFLOORU, VT))
return SDValue();
FloorISD = ISD::AVGFLOORU;
break;
default:
return SDValue();
}

// Captured values.
SDValue A, B, Add;

// Match floor average as it is common to both floor/ceil avgs.
if (!sd_match(N, m_BinOp(Opcode,
m_AllOf(m_Value(Add), m_Add(m_Value(A), m_Value(B))),
m_One())))
return SDValue();

// Can't optimize adds that may wrap.
if (IsUnsigned && !Add->getFlags().hasNoUnsignedWrap())
return SDValue();

if (!IsUnsigned && !Add->getFlags().hasNoSignedWrap())
return SDValue();

return DAG.getNode(FloorISD, SDLoc(N), N->getValueType(0), {A, B});
}

/// Generate Min/Max node
SDValue DAGCombiner::combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS,
SDValue RHS, SDValue True,
Expand Down
2 changes: 2 additions & 0 deletions llvm/lib/Target/ARM/ARMISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7951,6 +7951,8 @@ static bool IsQRMVEInstruction(const SDNode *N, const SDNode *Op) {
case ISD::MUL:
case ISD::SADDSAT:
case ISD::UADDSAT:
case ISD::AVGFLOORS:
case ISD::AVGFLOORU:
return true;
case ISD::SUB:
case ISD::SSUBSAT:
Expand Down
85 changes: 9 additions & 76 deletions llvm/lib/Target/ARM/ARMInstrMVE.td
Original file line number Diff line number Diff line change
Expand Up @@ -2222,64 +2222,6 @@ defm MVE_VRHADDu8 : MVE_VRHADD<MVE_v16u8, avgceilu>;
defm MVE_VRHADDu16 : MVE_VRHADD<MVE_v8u16, avgceilu>;
defm MVE_VRHADDu32 : MVE_VRHADD<MVE_v4u32, avgceilu>;

// Rounding Halving Add perform the arithemtic operation with an extra bit of
// precision, before performing the shift, to void clipping errors. We're not
// modelling that here with these patterns, but we're using no wrap forms of
// add to ensure that the extra bit of information is not needed for the
// arithmetic or the rounding.
let Predicates = [HasMVEInt] in {
def : Pat<(v16i8 (ARMvshrsImm (addnsw (addnsw (v16i8 MQPR:$Qm), (v16i8 MQPR:$Qn)),
(v16i8 (ARMvmovImm (i32 3585)))),
(i32 1))),
(MVE_VRHADDs8 MQPR:$Qm, MQPR:$Qn)>;
def : Pat<(v8i16 (ARMvshrsImm (addnsw (addnsw (v8i16 MQPR:$Qm), (v8i16 MQPR:$Qn)),
(v8i16 (ARMvmovImm (i32 2049)))),
(i32 1))),
(MVE_VRHADDs16 MQPR:$Qm, MQPR:$Qn)>;
def : Pat<(v4i32 (ARMvshrsImm (addnsw (addnsw (v4i32 MQPR:$Qm), (v4i32 MQPR:$Qn)),
(v4i32 (ARMvmovImm (i32 1)))),
(i32 1))),
(MVE_VRHADDs32 MQPR:$Qm, MQPR:$Qn)>;
def : Pat<(v16i8 (ARMvshruImm (addnuw (addnuw (v16i8 MQPR:$Qm), (v16i8 MQPR:$Qn)),
(v16i8 (ARMvmovImm (i32 3585)))),
(i32 1))),
(MVE_VRHADDu8 MQPR:$Qm, MQPR:$Qn)>;
def : Pat<(v8i16 (ARMvshruImm (addnuw (addnuw (v8i16 MQPR:$Qm), (v8i16 MQPR:$Qn)),
(v8i16 (ARMvmovImm (i32 2049)))),
(i32 1))),
(MVE_VRHADDu16 MQPR:$Qm, MQPR:$Qn)>;
def : Pat<(v4i32 (ARMvshruImm (addnuw (addnuw (v4i32 MQPR:$Qm), (v4i32 MQPR:$Qn)),
(v4i32 (ARMvmovImm (i32 1)))),
(i32 1))),
(MVE_VRHADDu32 MQPR:$Qm, MQPR:$Qn)>;

def : Pat<(v16i8 (ARMvshrsImm (addnsw (addnsw (v16i8 MQPR:$Qm), (v16i8 MQPR:$Qn)),
(v16i8 (ARMvdup (i32 1)))),
(i32 1))),
(MVE_VRHADDs8 MQPR:$Qm, MQPR:$Qn)>;
def : Pat<(v8i16 (ARMvshrsImm (addnsw (addnsw (v8i16 MQPR:$Qm), (v8i16 MQPR:$Qn)),
(v8i16 (ARMvdup (i32 1)))),
(i32 1))),
(MVE_VRHADDs16 MQPR:$Qm, MQPR:$Qn)>;
def : Pat<(v4i32 (ARMvshrsImm (addnsw (addnsw (v4i32 MQPR:$Qm), (v4i32 MQPR:$Qn)),
(v4i32 (ARMvdup (i32 1)))),
(i32 1))),
(MVE_VRHADDs32 MQPR:$Qm, MQPR:$Qn)>;
def : Pat<(v16i8 (ARMvshruImm (addnuw (addnuw (v16i8 MQPR:$Qm), (v16i8 MQPR:$Qn)),
(v16i8 (ARMvdup (i32 1)))),
(i32 1))),
(MVE_VRHADDu8 MQPR:$Qm, MQPR:$Qn)>;
def : Pat<(v8i16 (ARMvshruImm (addnuw (addnuw (v8i16 MQPR:$Qm), (v8i16 MQPR:$Qn)),
(v8i16 (ARMvdup (i32 1)))),
(i32 1))),
(MVE_VRHADDu16 MQPR:$Qm, MQPR:$Qn)>;
def : Pat<(v4i32 (ARMvshruImm (addnuw (addnuw (v4i32 MQPR:$Qm), (v4i32 MQPR:$Qn)),
(v4i32 (ARMvdup (i32 1)))),
(i32 1))),
(MVE_VRHADDu32 MQPR:$Qm, MQPR:$Qn)>;
}


class MVE_VHADDSUB<string iname, string suffix, bit U, bit subtract,
bits<2> size, list<dag> pattern=[]>
: MVE_int<iname, suffix, size, pattern> {
Expand All @@ -2303,8 +2245,7 @@ class MVE_VHSUB_<string suffix, bit U, bits<2> size,
: MVE_VHADDSUB<"vhsub", suffix, U, 0b1, size, pattern>;

multiclass MVE_VHADD_m<MVEVectorVTInfo VTI, SDNode Op,
SDPatternOperator unpred_op, Intrinsic PredInt, PatFrag add_op,
SDNode shift_op> {
SDPatternOperator unpred_op, Intrinsic PredInt> {
def "" : MVE_VHADD_<VTI.Suffix, VTI.Unsigned, VTI.Size>;
defvar Inst = !cast<Instruction>(NAME);
defm : MVE_TwoOpPattern<VTI, Op, PredInt, (? (i32 VTI.Unsigned)), !cast<Instruction>(NAME)>;
Expand All @@ -2313,26 +2254,18 @@ multiclass MVE_VHADD_m<MVEVectorVTInfo VTI, SDNode Op,
// Unpredicated add-and-divide-by-two
def : Pat<(VTI.Vec (unpred_op (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), (i32 VTI.Unsigned))),
(VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)))>;

def : Pat<(VTI.Vec (shift_op (add_op (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)), (i32 1))),
(Inst MQPR:$Qm, MQPR:$Qn)>;
}
}

multiclass MVE_VHADD<MVEVectorVTInfo VTI, SDNode Op, PatFrag add_op, SDNode shift_op>
: MVE_VHADD_m<VTI, Op, int_arm_mve_vhadd, int_arm_mve_hadd_predicated, add_op,
shift_op>;
multiclass MVE_VHADD<MVEVectorVTInfo VTI, SDNode Op>
: MVE_VHADD_m<VTI, Op, int_arm_mve_vhadd, int_arm_mve_hadd_predicated>;

// Halving add/sub perform the arithemtic operation with an extra bit of
// precision, before performing the shift, to void clipping errors. We're not
// modelling that here with these patterns, but we're using no wrap forms of
// add/sub to ensure that the extra bit of information is not needed.
defm MVE_VHADDs8 : MVE_VHADD<MVE_v16s8, avgfloors, addnsw, ARMvshrsImm>;
defm MVE_VHADDs16 : MVE_VHADD<MVE_v8s16, avgfloors, addnsw, ARMvshrsImm>;
defm MVE_VHADDs32 : MVE_VHADD<MVE_v4s32, avgfloors, addnsw, ARMvshrsImm>;
defm MVE_VHADDu8 : MVE_VHADD<MVE_v16u8, avgflooru, addnuw, ARMvshruImm>;
defm MVE_VHADDu16 : MVE_VHADD<MVE_v8u16, avgflooru, addnuw, ARMvshruImm>;
defm MVE_VHADDu32 : MVE_VHADD<MVE_v4u32, avgflooru, addnuw, ARMvshruImm>;
defm MVE_VHADDs8 : MVE_VHADD<MVE_v16s8, avgfloors>;
defm MVE_VHADDs16 : MVE_VHADD<MVE_v8s16, avgfloors>;
defm MVE_VHADDs32 : MVE_VHADD<MVE_v4s32, avgfloors>;
defm MVE_VHADDu8 : MVE_VHADD<MVE_v16u8, avgflooru>;
defm MVE_VHADDu16 : MVE_VHADD<MVE_v8u16, avgflooru>;
defm MVE_VHADDu32 : MVE_VHADD<MVE_v4u32, avgflooru>;

multiclass MVE_VHSUB_m<MVEVectorVTInfo VTI,
SDPatternOperator unpred_op, Intrinsic pred_int, PatFrag sub_op,
Expand Down
Loading
Loading