Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 0 additions & 95 deletions llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -677,95 +677,6 @@ bool RISCVDAGToDAGISel::trySignedBitfieldExtract(SDNode *Node) {
return false;
}

bool RISCVDAGToDAGISel::trySignedBitfieldInsertInMask(SDNode *Node) {
// Supported only in Xqcibm for now.
if (!Subtarget->hasVendorXqcibm())
return false;

using namespace SDPatternMatch;

SDValue X;
APInt MaskImm;
if (!sd_match(Node, m_Or(m_OneUse(m_Value(X)), m_ConstInt(MaskImm))))
return false;

unsigned ShAmt, Width;
if (!MaskImm.isShiftedMask(ShAmt, Width) || MaskImm.isSignedIntN(12))
return false;

// If Zbs is enabled and it is a single bit set we can use BSETI which
// can be compressed to C_BSETI when Xqcibm in enabled.
if (Width == 1 && Subtarget->hasStdExtZbs())
return false;

// If C1 is a shifted mask (but can't be formed as an ORI),
// use a bitfield insert of -1.
// Transform (or x, C1)
// -> (qc.insbi x, -1, width, shift)
SDLoc DL(Node);
MVT VT = Node->getSimpleValueType(0);

SDValue Ops[] = {X, CurDAG->getSignedTargetConstant(-1, DL, VT),
CurDAG->getTargetConstant(Width, DL, VT),
CurDAG->getTargetConstant(ShAmt, DL, VT)};
SDNode *BitIns = CurDAG->getMachineNode(RISCV::QC_INSBI, DL, VT, Ops);
ReplaceNode(Node, BitIns);
return true;
}

// Generate a QC_INSB/QC_INSBI from 'or (and X, MaskImm), OrImm' iff the value
// being inserted only sets known zero bits.
bool RISCVDAGToDAGISel::tryBitfieldInsertOpFromOrAndImm(SDNode *Node) {
// Supported only in Xqcibm for now.
if (!Subtarget->hasVendorXqcibm())
return false;

using namespace SDPatternMatch;

SDValue And;
APInt MaskImm, OrImm;
if (!sd_match(Node, m_Or(m_OneUse(m_And(m_Value(And), m_ConstInt(MaskImm))),
m_ConstInt(OrImm))))
return false;

// Compute the Known Zero for the AND as this allows us to catch more general
// cases than just looking for AND with imm.
KnownBits Known = CurDAG->computeKnownBits(Node->getOperand(0));

// The bits being inserted must only set those bits that are known to be zero.
if (!OrImm.isSubsetOf(Known.Zero)) {
// FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
// currently handle this case.
return false;
}

unsigned ShAmt, Width;
// The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
if (!Known.Zero.isShiftedMask(ShAmt, Width))
return false;

// QC_INSB(I) dst, src, #width, #shamt.
SDLoc DL(Node);
MVT VT = Node->getSimpleValueType(0);
SDValue ImmNode;
auto Opc = RISCV::QC_INSB;

int32_t LIImm = OrImm.getSExtValue() >> ShAmt;

if (isInt<5>(LIImm)) {
Opc = RISCV::QC_INSBI;
ImmNode = CurDAG->getSignedTargetConstant(LIImm, DL, MVT::i32);
} else {
ImmNode = selectImm(CurDAG, DL, MVT::i32, LIImm, *Subtarget);
}

SDValue Ops[] = {And, ImmNode, CurDAG->getTargetConstant(Width, DL, VT),
CurDAG->getTargetConstant(ShAmt, DL, VT)};
SDNode *BitIns = CurDAG->getMachineNode(Opc, DL, VT, Ops);
ReplaceNode(Node, BitIns);
return true;
}

bool RISCVDAGToDAGISel::trySignedBitfieldInsertInSign(SDNode *Node) {
// Only supported with XAndesPerf at the moment.
if (!Subtarget->hasVendorXAndesPerf())
Expand Down Expand Up @@ -1384,12 +1295,6 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
return;
}
case ISD::OR: {
if (trySignedBitfieldInsertInMask(Node))
return;

if (tryBitfieldInsertOpFromOrAndImm(Node))
return;

if (tryShrinkShlLogicImm(Node))
return;

Expand Down
2 changes: 0 additions & 2 deletions llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,6 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
bool tryShrinkShlLogicImm(SDNode *Node);
bool trySignedBitfieldExtract(SDNode *Node);
bool trySignedBitfieldInsertInSign(SDNode *Node);
bool trySignedBitfieldInsertInMask(SDNode *Node);
bool tryBitfieldInsertOpFromOrAndImm(SDNode *Node);
bool tryUnsignedBitfieldExtract(SDNode *Node, const SDLoc &DL, MVT VT,
SDValue X, unsigned Msb, unsigned Lsb);
bool tryUnsignedBitfieldInsertInZero(SDNode *Node, const SDLoc &DL, MVT VT,
Expand Down
88 changes: 86 additions & 2 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16182,7 +16182,6 @@ static SDValue combineXorToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
return SDValue();

using namespace SDPatternMatch;

SDValue Base, Inserted;
APInt CMask;
if (!sd_match(N, m_Xor(m_Value(Base),
Expand All @@ -16193,7 +16192,6 @@ static SDValue combineXorToBitfieldInsert(SDNode *N, SelectionDAG &DAG,

if (N->getValueType(0) != MVT::i32)
return SDValue();

unsigned Width, ShAmt;
if (!CMask.isShiftedMask(ShAmt, Width))
return SDValue();
Expand All @@ -16214,10 +16212,96 @@ static SDValue combineXorToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(RISCVISD::QC_INSB, DL, MVT::i32, Ops);
}

static SDValue combineOrToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
if (!Subtarget.hasVendorXqcibm())
return SDValue();

using namespace SDPatternMatch;

SDValue X;
APInt MaskImm;
if (!sd_match(N, m_Or(m_OneUse(m_Value(X)), m_ConstInt(MaskImm))))
return SDValue();

unsigned ShAmt, Width;
if (!MaskImm.isShiftedMask(ShAmt, Width) || MaskImm.isSignedIntN(12))
return SDValue();

if (N->getValueType(0) != MVT::i32)
return SDValue();

// If Zbs is enabled and it is a single bit set we can use BSETI which
// can be compressed to C_BSETI when Xqcibm in enabled.
if (Width == 1 && Subtarget.hasStdExtZbs())
return SDValue();

// If C1 is a shifted mask (but can't be formed as an ORI),
// use a bitfield insert of -1.
// Transform (or x, C1)
// -> (qc.insbi x, -1, width, shift)
SDLoc DL(N);

SDValue Ops[] = {X, DAG.getSignedConstant(-1, DL, MVT::i32),
DAG.getConstant(Width, DL, MVT::i32),
DAG.getConstant(ShAmt, DL, MVT::i32)};
return DAG.getNode(RISCVISD::QC_INSB, DL, MVT::i32, Ops);
}

// Generate a QC_INSB/QC_INSBI from 'or (and X, MaskImm), OrImm' iff the value
// being inserted only sets known zero bits.
static SDValue combineOrAndToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
// Supported only in Xqcibm for now.
if (!Subtarget.hasVendorXqcibm())
return SDValue();

using namespace SDPatternMatch;

SDValue Inserted;
APInt MaskImm, OrImm;
if (!sd_match(
N, m_SpecificVT(MVT::i32, m_Or(m_OneUse(m_And(m_Value(Inserted),
m_ConstInt(MaskImm))),
m_ConstInt(OrImm)))))
return SDValue();

// Compute the Known Zero for the AND as this allows us to catch more general
// cases than just looking for AND with imm.
KnownBits Known = DAG.computeKnownBits(N->getOperand(0));

// The bits being inserted must only set those bits that are known to be
// zero.
if (!OrImm.isSubsetOf(Known.Zero)) {
// FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
// currently handle this case.
return SDValue();
}

unsigned ShAmt, Width;
// The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
if (!Known.Zero.isShiftedMask(ShAmt, Width))
return SDValue();

// QC_INSB(I) dst, src, #width, #shamt.
SDLoc DL(N);

SDValue ImmNode =
DAG.getSignedConstant(OrImm.getSExtValue() >> ShAmt, DL, MVT::i32);

SDValue Ops[] = {Inserted, ImmNode, DAG.getConstant(Width, DL, MVT::i32),
DAG.getConstant(ShAmt, DL, MVT::i32)};
return DAG.getNode(RISCVISD::QC_INSB, DL, MVT::i32, Ops);
}

static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
const RISCVSubtarget &Subtarget) {
SelectionDAG &DAG = DCI.DAG;

if (SDValue V = combineOrToBitfieldInsert(N, DAG, Subtarget))
return V;
if (SDValue V = combineOrAndToBitfieldInsert(N, DAG, Subtarget))
return V;
if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
return V;
if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
Expand Down