-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[RISCV] Add changes to have better coverage for qc.insb and qc.insbi #154135
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Change-Id: I364e2a81ffd358c4f8250bae120a35fbbbd32cac
Change-Id: Ie1e465530bdf7c1a10def0e3a5bb995f4e055b7e
|
@llvm/pr-subscribers-backend-risc-v Author: quic_hchandel (hchandel) ChangesCo-authored by @lenary Full diff: https://github.com/llvm/llvm-project/pull/154135.diff 5 Files Affected:
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 9e1530a2d00f4..28598bcce2624 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -773,49 +773,6 @@ bool RISCVDAGToDAGISel::trySignedBitfieldInsertInSign(SDNode *Node) {
return false;
}
-// (xor X, (and (xor X, C1), C2))
-// -> (qc.insbi X, (C1 >> ShAmt), Width, ShAmt)
-// where C2 is a shifted mask with width=Width and shift=ShAmt
-bool RISCVDAGToDAGISel::tryBitfieldInsertOpFromXor(SDNode *Node) {
-
- if (!Subtarget->hasVendorXqcibm())
- return false;
-
- using namespace SDPatternMatch;
-
- SDValue X;
- APInt CImm, CMask;
- if (!sd_match(
- Node,
- m_Xor(m_Value(X),
- m_OneUse(m_And(m_OneUse(m_Xor(m_Deferred(X), m_ConstInt(CImm))),
- m_ConstInt(CMask))))))
- return false;
-
- unsigned Width, ShAmt;
- if (!CMask.isShiftedMask(ShAmt, Width))
- return false;
-
- int64_t Imm = CImm.getSExtValue();
- Imm >>= ShAmt;
-
- SDLoc DL(Node);
- SDValue ImmNode;
- auto Opc = RISCV::QC_INSB;
-
- if (isInt<5>(Imm)) {
- Opc = RISCV::QC_INSBI;
- ImmNode = CurDAG->getSignedTargetConstant(Imm, DL, MVT::i32);
- } else {
- ImmNode = selectImm(CurDAG, DL, MVT::i32, Imm, *Subtarget);
- }
- SDValue Ops[] = {X, ImmNode, CurDAG->getTargetConstant(Width, DL, MVT::i32),
- CurDAG->getTargetConstant(ShAmt, DL, MVT::i32)};
- ReplaceNode(Node, CurDAG->getMachineNode(Opc, DL, MVT::i32, Ops));
-
- return true;
-}
-
bool RISCVDAGToDAGISel::tryUnsignedBitfieldExtract(SDNode *Node,
const SDLoc &DL, MVT VT,
SDValue X, unsigned Msb,
@@ -1393,9 +1350,6 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
if (tryShrinkShlLogicImm(Node))
return;
- if (tryBitfieldInsertOpFromXor(Node))
- return;
-
break;
case ISD::AND: {
auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 9d4cd0e6e3393..ee3a86e25add0 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -75,7 +75,6 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
bool trySignedBitfieldExtract(SDNode *Node);
bool trySignedBitfieldInsertInSign(SDNode *Node);
bool trySignedBitfieldInsertInMask(SDNode *Node);
- bool tryBitfieldInsertOpFromXor(SDNode *Node);
bool tryUnsignedBitfieldExtract(SDNode *Node, const SDLoc &DL, MVT VT,
SDValue X, unsigned Msb, unsigned Lsb);
bool tryUnsignedBitfieldInsertInZero(SDNode *Node, const SDLoc &DL, MVT VT,
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index ce03818b49502..9889a0039b648 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16059,6 +16059,45 @@ static SDValue combineOrOfCZERO(SDNode *N, SDValue N0, SDValue N1,
return DAG.getNode(ISD::XOR, DL, VT, NewOr, TrueV.getOperand(1));
}
+// (xor X, (and(xor X, Y), C2))
+// ->(qc_insb X, (sra Y, ShAmt), Width, ShAmt)
+// where C2 is a shifted mask with width = Width and shift = ShAmt
+// qc_insb might become qc.insb or qc.insbi depending on the operands.
+static SDValue combineXorToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ if (!Subtarget.hasVendorXqcibm())
+ return SDValue();
+
+ using namespace SDPatternMatch;
+
+ SDValue Base, Inserted;
+ APInt CMask;
+ if (!sd_match(N, m_Xor(m_Value(Base),
+ m_OneUse(m_And(m_OneUse(m_Xor(m_Deferred(Base),
+ m_Value(Inserted))),
+ m_ConstInt(CMask))))))
+ return SDValue();
+
+ if (N->getValueType(0) != MVT::i32 || Base.getValueType() != MVT::i32 ||
+ Inserted.getValueType() != MVT::i32)
+ return SDValue();
+
+ unsigned Width, ShAmt;
+ if (!CMask.isShiftedMask(ShAmt, Width))
+ return SDValue();
+
+ SDLoc DL(N);
+
+ // `Inserted` needs to be right - shifted before it is put into the
+ // instruction.
+ Inserted = DAG.getNode(ISD::SRA, DL, MVT::i32, Inserted,
+ DAG.getShiftAmountConstant(ShAmt, MVT::i32, DL));
+
+ SDValue Ops[] = {Base, Inserted, DAG.getConstant(Width, DL, MVT::i32),
+ DAG.getConstant(ShAmt, DL, MVT::i32)};
+ return DAG.getNode(RISCVISD::QC_INSB, DL, MVT::i32, Ops);
+}
+
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
const RISCVSubtarget &Subtarget) {
SelectionDAG &DAG = DCI.DAG;
@@ -16131,6 +16170,9 @@ static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
}
}
+ if (SDValue V = combineXorToBitfieldInsert(N, DAG, Subtarget))
+ return V;
+
if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
return V;
if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
index 2c64b0c220fba..dc58f1e2c0d61 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
@@ -22,6 +22,13 @@ def SDT_SetMultiple : SDTypeProfile<0, 4, [SDTCisSameAs<0, 1>,
def qc_setwmi : RVSDNode<"QC_SETWMI", SDT_SetMultiple,
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+def qc_insb : RVSDNode<"QC_INSB", SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>,
+ SDTCisSameAs<0, 2>,
+ SDTCisVT<0, i32>,
+ SDTCisInt<3>,
+ SDTCisInt<4>]>,
+ []>;
+
def uimm5nonzero : RISCVOp<XLenVT>,
ImmLeaf<XLenVT, [{return (Imm != 0) && isUInt<5>(Imm);}]> {
let ParserMatchClass = UImmAsmOperand<5, "NonZero">;
@@ -1508,6 +1515,11 @@ def : Pat<(i32 (and GPRNoX0:$rs, 1023)), (QC_EXTU GPRNoX0:$rs, 10, 0)>;
def : Pat<(i32 (and GPRNoX0:$rs, 2047)), (QC_EXTU GPRNoX0:$rs, 11, 0)>;
def : Pat<(i32 (bitreverse GPRNoX0:$rs1)), (QC_BREV32 GPRNoX0:$rs1)>;
+
+def : Pat<(qc_insb GPRNoX0:$rd, simm5:$imm5, uimm5_plus1:$width, uimm5:$shamt),
+ (QC_INSBI GPRNoX0:$rd, simm5:$imm5, uimm5_plus1:$width, uimm5:$shamt)>;
+def : Pat<(qc_insb GPRNoX0:$rd, GPR:$rs1, uimm5_plus1:$width, uimm5:$shamt),
+ (QC_INSB GPRNoX0:$rd, GPR:$rs1, uimm5_plus1:$width, uimm5:$shamt)>;
} // Predicates = [HasVendorXqcibm, IsRV32]
// If Zbb is enabled sext.b/h is preferred since they are compressible
diff --git a/llvm/test/CodeGen/RISCV/xqcibm-insbi.ll b/llvm/test/CodeGen/RISCV/xqcibm-insbi.ll
index e4a545169d210..9ea4bce968af2 100644
--- a/llvm/test/CodeGen/RISCV/xqcibm-insbi.ll
+++ b/llvm/test/CodeGen/RISCV/xqcibm-insbi.ll
@@ -260,3 +260,271 @@ define i64 @insbi_i64_large_mask(i64 %in1) nounwind {
%xor2 = xor i64 %and1, %in1
ret i64 %xor2
}
+
+define i32 @insb(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 1
+; RV32I-NEXT: xor a1, a1, a0
+; RV32I-NEXT: andi a1, a1, -2
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: insb:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a1, a1, 31, 0
+; RV32XQCIBM-NEXT: qc.insb a0, a1, 31, 1
+; RV32XQCIBM-NEXT: ret
+ %shl1 = shl i32 %in2, 1
+ %xor1 = xor i32 %shl1, %in1
+ %and1 = and i32 -2, %xor1
+ %xor2 = xor i32 %in1, %and1
+ ret i32 %xor2
+}
+
+define i32 @insb_and_mul(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_and_mul:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 1
+; RV32I-NEXT: xor a1, a1, a0
+; RV32I-NEXT: andi a1, a1, -2
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: insb_and_mul:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: slli a1, a1, 1
+; RV32XQCIBM-NEXT: xor a1, a1, a0
+; RV32XQCIBM-NEXT: andi a1, a1, -2
+; RV32XQCIBM-NEXT: xor a0, a0, a1
+; RV32XQCIBM-NEXT: add a0, a0, a1
+; RV32XQCIBM-NEXT: ret
+ %shl1 = shl i32 %in2, 1
+ %xor1 = xor i32 %shl1, %in1
+ %and1 = and i32 -2, %xor1
+ %xor2 = xor i32 %in1, %and1
+ %add1 = add i32 %xor2, %and1
+ ret i32 %add1
+}
+
+define i32 @insb_xor_mul(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_xor_mul:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 1
+; RV32I-NEXT: xor a1, a1, a0
+; RV32I-NEXT: andi a2, a1, -2
+; RV32I-NEXT: xor a0, a0, a2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: insb_xor_mul:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: slli a1, a1, 1
+; RV32XQCIBM-NEXT: xor a1, a1, a0
+; RV32XQCIBM-NEXT: andi a2, a1, -2
+; RV32XQCIBM-NEXT: xor a0, a0, a2
+; RV32XQCIBM-NEXT: add a0, a0, a1
+; RV32XQCIBM-NEXT: ret
+ %shl1 = shl i32 %in2, 1
+ %xor1 = xor i32 %shl1, %in1
+ %and1 = and i32 -2, %xor1
+ %xor2 = xor i32 %in1, %and1
+ %add1 = add i32 %xor2, %xor1
+ ret i32 %add1
+}
+
+define i32 @insb_shl_mul(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_shl_mul:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 1
+; RV32I-NEXT: xor a2, a1, a0
+; RV32I-NEXT: andi a2, a2, -2
+; RV32I-NEXT: xor a0, a0, a2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: insb_shl_mul:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: slli a1, a1, 1
+; RV32XQCIBM-NEXT: srai a2, a1, 1
+; RV32XQCIBM-NEXT: qc.insb a0, a2, 31, 1
+; RV32XQCIBM-NEXT: add a0, a0, a1
+; RV32XQCIBM-NEXT: ret
+ %shl1 = shl i32 %in2, 1
+ %xor1 = xor i32 %shl1, %in1
+ %and1 = and i32 -2, %xor1
+ %xor2 = xor i32 %in1, %and1
+ %add1 = add i32 %xor2, %shl1
+ ret i32 %add1
+}
+
+define i32 @insb_comm(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_comm:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 1
+; RV32I-NEXT: xor a1, a0, a1
+; RV32I-NEXT: andi a1, a1, -2
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: insb_comm:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a1, a1, 31, 0
+; RV32XQCIBM-NEXT: qc.insb a0, a1, 31, 1
+; RV32XQCIBM-NEXT: ret
+ %shl1 = shl i32 %in2, 1
+ %xor1 = xor i32 %in1, %shl1
+ %and1 = and i32 -2, %xor1
+ %xor2 = xor i32 %in1, %and1
+ ret i32 %xor2
+}
+
+define i32 @insb_comm1(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_comm1:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 1
+; RV32I-NEXT: xor a1, a0, a1
+; RV32I-NEXT: andi a1, a1, -2
+; RV32I-NEXT: xor a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: insb_comm1:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a1, a1, 31, 0
+; RV32XQCIBM-NEXT: qc.insb a0, a1, 31, 1
+; RV32XQCIBM-NEXT: ret
+ %shl1 = shl i32 %in2, 1
+ %xor1 = xor i32 %in1, %shl1
+ %and1 = and i32 -2, %xor1
+ %xor2 = xor i32 %and1, %in1
+ ret i32 %xor2
+}
+
+define i32 @insb_comm2(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_comm2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 1
+; RV32I-NEXT: xor a1, a0, a1
+; RV32I-NEXT: andi a1, a1, -2
+; RV32I-NEXT: xor a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: insb_comm2:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a1, a1, 31, 0
+; RV32XQCIBM-NEXT: qc.insb a0, a1, 31, 1
+; RV32XQCIBM-NEXT: ret
+ %shl1 = shl i32 %in2, 1
+ %xor1 = xor i32 %in1, %shl1
+ %and1 = and i32 %xor1, -2
+ %xor2 = xor i32 %and1, %in1
+ ret i32 %xor2
+}
+
+define i32 @insb_not_shifted_mask(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_not_shifted_mask:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 18
+; RV32I-NEXT: xor a1, a0, a1
+; RV32I-NEXT: lui a2, 320
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: insb_not_shifted_mask:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: slli a1, a1, 18
+; RV32XQCIBM-NEXT: xor a1, a1, a0
+; RV32XQCIBM-NEXT: lui a2, 320
+; RV32XQCIBM-NEXT: and a1, a1, a2
+; RV32XQCIBM-NEXT: xor a0, a0, a1
+; RV32XQCIBM-NEXT: ret
+ %shl1 = shl i32 %in2, 18
+ %xor1 = xor i32 %in1, %shl1
+ %and1 = and i32 1310720, %xor1
+ %xor2 = xor i32 %in1, %and1
+ ret i32 %xor2
+}
+
+define i32 @insb_shift_diffrom_mask(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_shift_diffrom_mask:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 16
+; RV32I-NEXT: xor a1, a0, a1
+; RV32I-NEXT: lui a2, 192
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: insb_shift_diffrom_mask:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a1, a1, 14, 2
+; RV32XQCIBM-NEXT: qc.insb a0, a1, 2, 18
+; RV32XQCIBM-NEXT: ret
+ %shl1 = shl i32 %in2, 16
+ %xor1 = xor i32 %in1, %shl1
+ %and1 = and i32 786432, %xor1
+ %xor2 = xor i32 %in1, %and1
+ ret i32 %xor2
+}
+
+define i64 @insb_i64(i64 %in1, i64 %in2) nounwind {
+; RV32I-LABEL: insb_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: srli a1, a2, 31
+; RV32I-NEXT: slli a3, a3, 1
+; RV32I-NEXT: slli a2, a2, 1
+; RV32I-NEXT: or a1, a3, a1
+; RV32I-NEXT: xor a2, a0, a2
+; RV32I-NEXT: andi a2, a2, -2
+; RV32I-NEXT: xor a0, a2, a0
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: insb_i64:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: srli a1, a2, 31
+; RV32XQCIBM-NEXT: slli a3, a3, 1
+; RV32XQCIBM-NEXT: qc.ext a2, a2, 31, 0
+; RV32XQCIBM-NEXT: or a1, a1, a3
+; RV32XQCIBM-NEXT: qc.insb a0, a2, 31, 1
+; RV32XQCIBM-NEXT: ret
+ %shl1 = shl i64 %in2, 1
+ %xor1 = xor i64 %in1, %shl1
+ %and1 = and i64 %xor1, -2
+ %xor2 = xor i64 %and1, %in1
+ ret i64 %xor2
+}
+
+define i64 @insb_i64_only(i64 %in1, i64 %in2) nounwind {
+; RV32I-LABEL: insb_i64_only:
+; RV32I: # %bb.0:
+; RV32I-NEXT: srli a4, a2, 31
+; RV32I-NEXT: slli a3, a3, 1
+; RV32I-NEXT: slli a2, a2, 1
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: lui a4, 524288
+; RV32I-NEXT: xor a2, a0, a2
+; RV32I-NEXT: xor a3, a1, a3
+; RV32I-NEXT: and a2, a2, a4
+; RV32I-NEXT: andi a3, a3, 7
+; RV32I-NEXT: xor a0, a2, a0
+; RV32I-NEXT: xor a1, a3, a1
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: insb_i64_only:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: srli a4, a2, 31
+; RV32XQCIBM-NEXT: slli a3, a3, 1
+; RV32XQCIBM-NEXT: qc.ext a2, a2, 1, 30
+; RV32XQCIBM-NEXT: or a3, a3, a4
+; RV32XQCIBM-NEXT: qc.insb a0, a2, 1, 31
+; RV32XQCIBM-NEXT: qc.insb a1, a3, 3, 0
+; RV32XQCIBM-NEXT: ret
+ %shl1 = shl i64 %in2, 1
+ %xor1 = xor i64 %in1, %shl1
+ %and1 = and i64 %xor1, 32212254720
+ %xor2 = xor i64 %and1, %in1
+ ret i64 %xor2
+}
+
|
| m_ConstInt(CMask)))))) | ||
| return SDValue(); | ||
|
|
||
| if (N->getValueType(0) != MVT::i32 || Base.getValueType() != MVT::i32 || |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Since you are't looking through at extends or truncates, the N->getValueType() != MVT::i32 should be enough.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is my fault, I was ensuring that the combine was conservative, and wasn't sure how much these could change.
Do we want to work harder to delay this combine until after (type?) legalisation? I wasn't sure because not many other combines do so, but I see you made some changes in that direction today.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done. Removed the extra checks.
| ret i64 %xor2 | ||
| } | ||
|
|
||
| define i32 @insb(i32 %in1, i32 %in2) nounwind { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Most of these tests seem to be affected by InstCombine. Are we testing the right patterns?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I changed the code to match the correct patterns after observing them by running InstCombine on the IR that we want to match. Hopefully, now it tests the correct patterns. Also all the test cases with immediate got changed by InstCombine to a sequence of or and and which I think is covered in #154023.
| ; RV32XQCIBM-NEXT: ret | ||
| %shl1 = shl i32 %in2, 1 | ||
| %xor1 = xor i32 %shl1, %in1 | ||
| %and1 = and i32 -2, %xor1 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why is the and mask on the left hand side?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done
Change-Id: I8f44ab71c78ccacd3e0e58e35e1e152b966f866c
|
Failing tests are due to LLDB. I am happy with this but as I co-authored it, I'm not going to +1 it. |
|
|
||
| SDLoc DL(N); | ||
|
|
||
| // `Inserted` needs to be right - shifted before it is put into the |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| // `Inserted` needs to be right - shifted before it is put into the | |
| // `Inserted` needs to be right shifted before it is put into the |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done
Change-Id: I38c1e96d7bc24a4e1c8d24981049037c4fba934b
Change-Id: I5477104ad9da6b997275da05647e9457e76bcb3e
| // Lower (setugt X, 2047) as (setne (srl X, 11), 0). | ||
| if (CCVal == ISD::SETUGT && Imm == 2047) { | ||
| SDValue Shift = DAG.getNode(ISD::SRL, DL, OpVT, LHS, | ||
| DAG.getShiftAmountConstant(11, OpVT, DL)); | ||
| return DAG.getSetCC(DL, VT, Shift, DAG.getConstant(0, DL, OpVT), | ||
| ISD::SETNE); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Computing known bits is much more difficult than e.g. looking at the value type or checking if something is a shifted mask. Can you re-order this check to after the other two checks for efficiency?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Moved it.
Change-Id: I006d9aa82ba04ac1de3a489cee605cbb18c47e3a
|
I am happy with this but as I co-authored a version of it I cannot approve it. |
| if (!CMask.isShiftedMask(ShAmt, Width)) | ||
| return SDValue(); | ||
|
|
||
| KnownBits Known = DAG.computeKnownBits(Inserted); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You can use DAG.MaskedValueIsZero(Inserted, ~CMask)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done
Change-Id: I420aa718d3f40e3ec8f00b328b1293f50ff28a82
topperc
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Before this patch, the selection for
QC_INSBandQC_INSBIentirely happens in C++, and does not support more than one non-constant input.This patch seeks to rectify this shortcoming, by moving the C++ into a target-specific DAGCombine, and adding
RISCV::QC_INSB. One advantage is this simplifies the code for handlingQC_INSBI, as the C++ no longer needs to choose between the two instructions based on the inserted value (this is still done, but via ISel Patterns).Another advantage of the DAGCombine is that this introduction can also shift the inserted value to the
QC_INSB, which our patterns need (and were previously doing to the constant), and this shift can be CSE'd/optimised with any prior shifts, if they exist. This allows the inserted value to be variable, rather than a constant.