@@ -2077,7 +2077,7 @@ bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
20772077 if (isInt<32>(Val))
20782078 return true;
20792079
2080- // A constant pool entry may be more aligned thant he load we're trying to
2080+ // A constant pool entry may be more aligned than the load we're trying to
20812081 // replace. If we don't support unaligned scalar mem, prefer the constant
20822082 // pool.
20832083 // TODO: Can the caller pass down the alignment?
@@ -2921,7 +2921,7 @@ static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
29212921 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
29222922
29232923 if (!DstVT.isVector()) {
2924- // For bf16 or for f16 in absense of Zfh, promote to f32, then saturate
2924+ // For bf16 or for f16 in absence of Zfh, promote to f32, then saturate
29252925 // the result.
29262926 if ((Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) ||
29272927 Src.getValueType() == MVT::bf16) {
@@ -3186,7 +3186,7 @@ lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
31863186
31873187// Expand vector STRICT_FTRUNC, STRICT_FCEIL, STRICT_FFLOOR, STRICT_FROUND
31883188// STRICT_FROUNDEVEN and STRICT_FNEARBYINT by converting sNan of the source to
3189- // qNan and coverting the new source to integer and back to FP.
3189+ // qNan and converting the new source to integer and back to FP.
31903190static SDValue
31913191lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
31923192 const RISCVSubtarget &Subtarget) {
@@ -3206,7 +3206,7 @@ lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
32063206 // Freeze the source since we are increasing the number of uses.
32073207 Src = DAG.getFreeze(Src);
32083208
3209- // Covert sNan to qNan by executing x + x for all unordered elemenet x in Src.
3209+ // Convert sNan to qNan by executing x + x for all unordered element x in Src.
32103210 MVT MaskVT = Mask.getSimpleValueType();
32113211 SDValue Unorder = DAG.getNode(RISCVISD::STRICT_FSETCC_VL, DL,
32123212 DAG.getVTList(MaskVT, MVT::Other),
@@ -3724,7 +3724,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
37243724 unsigned NumViaIntegerBits = std::clamp(NumElts, 8u, Subtarget.getXLen());
37253725 NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELen());
37263726 // If we have to use more than one INSERT_VECTOR_ELT then this
3727- // optimization is likely to increase code size; avoid peforming it in
3727+ // optimization is likely to increase code size; avoid performing it in
37283728 // such a case. We can use a load from a constant pool in this case.
37293729 if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
37303730 return SDValue();
@@ -4618,7 +4618,7 @@ static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
46184618 int MaskSrc = M < Size ? 0 : 1;
46194619
46204620 // Compute which of the two target values this index should be assigned to.
4621- // This reflects whether the high elements are remaining or the low elemnts
4621+ // This reflects whether the high elements are remaining or the low elements
46224622 // are remaining.
46234623 int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
46244624
@@ -8567,7 +8567,7 @@ SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
85678567 SDValue RHS = CondV.getOperand(1);
85688568 ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
85698569
8570- // Special case for a select of 2 constants that have a diffence of 1.
8570+ // Special case for a select of 2 constants that have a difference of 1.
85718571 // Normally this is done by DAGCombine, but if the select is introduced by
85728572 // type legalization or op legalization, we miss it. Restricting to SETLT
85738573 // case for now because that is what signed saturating add/sub need.
@@ -9717,7 +9717,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
97179717// We need to convert from a scalable VF to a vsetvli with VLMax equal to
97189718// (vscale * VF). The vscale and VF are independent of element width. We use
97199719// SEW=8 for the vsetvli because it is the only element width that supports all
9720- // fractional LMULs. The LMUL is choosen so that with SEW=8 the VLMax is
9720+ // fractional LMULs. The LMUL is chosen so that with SEW=8 the VLMax is
97219721// (vscale * VF). Where vscale is defined as VLEN/RVVBitsPerBlock. The
97229722// InsertVSETVLI pass can fix up the vtype of the vsetvli if a different
97239723// SEW and LMUL are better for the surrounding vector instructions.
@@ -13203,7 +13203,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
1320313203 return;
1320413204 if (IsStrict) {
1320513205 SDValue Chain = N->getOperand(0);
13206- // In absense of Zfh, promote f16 to f32, then convert.
13206+ // In absence of Zfh, promote f16 to f32, then convert.
1320713207 if (Op0.getValueType() == MVT::f16 &&
1320813208 !Subtarget.hasStdExtZfhOrZhinx()) {
1320913209 Op0 = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other},
@@ -13220,7 +13220,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
1322013220 Results.push_back(Res.getValue(1));
1322113221 return;
1322213222 }
13223- // For bf16, or f16 in absense of Zfh, promote [b]f16 to f32 and then
13223+ // For bf16, or f16 in absence of Zfh, promote [b]f16 to f32 and then
1322413224 // convert.
1322513225 if ((Op0.getValueType() == MVT::f16 &&
1322613226 !Subtarget.hasStdExtZfhOrZhinx()) ||
@@ -13263,7 +13263,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
1326313263 if (!isTypeLegal(Op0VT))
1326413264 return;
1326513265
13266- // In absense of Zfh, promote f16 to f32, then convert.
13266+ // In absence of Zfh, promote f16 to f32, then convert.
1326713267 if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx())
1326813268 Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op0);
1326913269
@@ -13890,7 +13890,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
1389013890static unsigned getVecReduceOpcode(unsigned Opc) {
1389113891 switch (Opc) {
1389213892 default:
13893- llvm_unreachable("Unhandled binary to transfrom reduction");
13893+ llvm_unreachable("Unhandled binary to transform reduction");
1389413894 case ISD::ADD:
1389513895 return ISD::VECREDUCE_ADD;
1389613896 case ISD::UMAX:
@@ -14020,7 +14020,7 @@ static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG,
1402014020 auto BinOpToRVVReduce = [](unsigned Opc) {
1402114021 switch (Opc) {
1402214022 default:
14023- llvm_unreachable("Unhandled binary to transfrom reduction");
14023+ llvm_unreachable("Unhandled binary to transform reduction");
1402414024 case ISD::ADD:
1402514025 return RISCVISD::VECREDUCE_ADD_VL;
1402614026 case ISD::UMAX:
@@ -15577,7 +15577,7 @@ struct NodeExtensionHelper {
1557715577
1557815578 bool isSupportedFPExtend(SDNode *Root, MVT NarrowEltVT,
1557915579 const RISCVSubtarget &Subtarget) {
15580- // Any f16 extension will neeed zvfh
15580+ // Any f16 extension will need zvfh
1558115581 if (NarrowEltVT == MVT::f16 && !Subtarget.hasVInstructionsF16())
1558215582 return false;
1558315583 // The only bf16 extension we can do is vfmadd_vl -> vfwmadd_vl with
@@ -16326,7 +16326,7 @@ static SDValue performMemPairCombine(SDNode *N,
1632616326 if (Base1 != Base2)
1632716327 continue;
1632816328
16329- // Check if the offsets match the XTHeadMemPair encoding contraints .
16329+ // Check if the offsets match the XTHeadMemPair encoding constraints .
1633016330 bool Valid = false;
1633116331 if (MemVT == MVT::i32) {
1633216332 // Check for adjacent i32 values and a 2-bit index.
@@ -16954,7 +16954,7 @@ static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
1695416954}
1695516955
1695616956// Invert (and/or (set cc X, Y), (xor Z, 1)) to (or/and (set !cc X, Y)), Z) if
16957- // the result is used as the conditon of a br_cc or select_cc we can invert,
16957+ // the result is used as the condition of a br_cc or select_cc we can invert,
1695816958// inverting the setcc is free, and Z is 0/1. Caller will invert the
1695916959// br_cc/select_cc.
1696016960static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) {
@@ -17015,7 +17015,7 @@ static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) {
1701517015 return DAG.getNode(Opc, SDLoc(Cond), VT, Setcc, Xor.getOperand(0));
1701617016}
1701717017
17018- // Perform common combines for BR_CC and SELECT_CC condtions .
17018+ // Perform common combines for BR_CC and SELECT_CC conditions .
1701917019static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL,
1702017020 SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {
1702117021 ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
@@ -18603,7 +18603,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
1860318603 const int64_t Addend = SimpleVID->Addend;
1860418604
1860518605 // Note: We don't need to check alignment here since (by assumption
18606- // from the existance of the gather), our offsets must be sufficiently
18606+ // from the existence of the gather), our offsets must be sufficiently
1860718607 // aligned.
1860818608
1860918609 const EVT PtrVT = getPointerTy(DAG.getDataLayout());
@@ -20639,7 +20639,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
2063920639 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2064020640 MVT XLenVT = Subtarget.getXLenVT();
2064120641 unsigned XLenInBytes = Subtarget.getXLen() / 8;
20642- // Used with vargs to acumulate store chains.
20642+ // Used with vargs to accumulate store chains.
2064320643 std::vector<SDValue> OutChains;
2064420644
2064520645 // Assign locations to all of the incoming arguments.
0 commit comments