@@ -2077,7 +2077,7 @@ bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
2077
2077
if (isInt<32>(Val))
2078
2078
return true;
2079
2079
2080
- // A constant pool entry may be more aligned thant he load we're trying to
2080
+ // A constant pool entry may be more aligned than the load we're trying to
2081
2081
// replace. If we don't support unaligned scalar mem, prefer the constant
2082
2082
// pool.
2083
2083
// TODO: Can the caller pass down the alignment?
@@ -2921,7 +2921,7 @@ static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
2921
2921
bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
2922
2922
2923
2923
if (!DstVT.isVector()) {
2924
- // For bf16 or for f16 in absense of Zfh, promote to f32, then saturate
2924
+ // For bf16 or for f16 in absence of Zfh, promote to f32, then saturate
2925
2925
// the result.
2926
2926
if ((Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) ||
2927
2927
Src.getValueType() == MVT::bf16) {
@@ -3186,7 +3186,7 @@ lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
3186
3186
3187
3187
// Expand vector STRICT_FTRUNC, STRICT_FCEIL, STRICT_FFLOOR, STRICT_FROUND
3188
3188
// STRICT_FROUNDEVEN and STRICT_FNEARBYINT by converting sNan of the source to
3189
- // qNan and coverting the new source to integer and back to FP.
3189
+ // qNan and converting the new source to integer and back to FP.
3190
3190
static SDValue
3191
3191
lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
3192
3192
const RISCVSubtarget &Subtarget) {
@@ -3206,7 +3206,7 @@ lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
3206
3206
// Freeze the source since we are increasing the number of uses.
3207
3207
Src = DAG.getFreeze(Src);
3208
3208
3209
- // Covert sNan to qNan by executing x + x for all unordered elemenet x in Src.
3209
+ // Convert sNan to qNan by executing x + x for all unordered element x in Src.
3210
3210
MVT MaskVT = Mask.getSimpleValueType();
3211
3211
SDValue Unorder = DAG.getNode(RISCVISD::STRICT_FSETCC_VL, DL,
3212
3212
DAG.getVTList(MaskVT, MVT::Other),
@@ -3724,7 +3724,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
3724
3724
unsigned NumViaIntegerBits = std::clamp(NumElts, 8u, Subtarget.getXLen());
3725
3725
NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELen());
3726
3726
// If we have to use more than one INSERT_VECTOR_ELT then this
3727
- // optimization is likely to increase code size; avoid peforming it in
3727
+ // optimization is likely to increase code size; avoid performing it in
3728
3728
// such a case. We can use a load from a constant pool in this case.
3729
3729
if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
3730
3730
return SDValue();
@@ -4618,7 +4618,7 @@ static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
4618
4618
int MaskSrc = M < Size ? 0 : 1;
4619
4619
4620
4620
// Compute which of the two target values this index should be assigned to.
4621
- // This reflects whether the high elements are remaining or the low elemnts
4621
+ // This reflects whether the high elements are remaining or the low elements
4622
4622
// are remaining.
4623
4623
int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
4624
4624
@@ -8567,7 +8567,7 @@ SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
8567
8567
SDValue RHS = CondV.getOperand(1);
8568
8568
ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
8569
8569
8570
- // Special case for a select of 2 constants that have a diffence of 1.
8570
+ // Special case for a select of 2 constants that have a difference of 1.
8571
8571
// Normally this is done by DAGCombine, but if the select is introduced by
8572
8572
// type legalization or op legalization, we miss it. Restricting to SETLT
8573
8573
// case for now because that is what signed saturating add/sub need.
@@ -9717,7 +9717,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
9717
9717
// We need to convert from a scalable VF to a vsetvli with VLMax equal to
9718
9718
// (vscale * VF). The vscale and VF are independent of element width. We use
9719
9719
// SEW=8 for the vsetvli because it is the only element width that supports all
9720
- // fractional LMULs. The LMUL is choosen so that with SEW=8 the VLMax is
9720
+ // fractional LMULs. The LMUL is chosen so that with SEW=8 the VLMax is
9721
9721
// (vscale * VF). Where vscale is defined as VLEN/RVVBitsPerBlock. The
9722
9722
// InsertVSETVLI pass can fix up the vtype of the vsetvli if a different
9723
9723
// SEW and LMUL are better for the surrounding vector instructions.
@@ -13203,7 +13203,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
13203
13203
return;
13204
13204
if (IsStrict) {
13205
13205
SDValue Chain = N->getOperand(0);
13206
- // In absense of Zfh, promote f16 to f32, then convert.
13206
+ // In absence of Zfh, promote f16 to f32, then convert.
13207
13207
if (Op0.getValueType() == MVT::f16 &&
13208
13208
!Subtarget.hasStdExtZfhOrZhinx()) {
13209
13209
Op0 = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other},
@@ -13220,7 +13220,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
13220
13220
Results.push_back(Res.getValue(1));
13221
13221
return;
13222
13222
}
13223
- // For bf16, or f16 in absense of Zfh, promote [b]f16 to f32 and then
13223
+ // For bf16, or f16 in absence of Zfh, promote [b]f16 to f32 and then
13224
13224
// convert.
13225
13225
if ((Op0.getValueType() == MVT::f16 &&
13226
13226
!Subtarget.hasStdExtZfhOrZhinx()) ||
@@ -13263,7 +13263,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
13263
13263
if (!isTypeLegal(Op0VT))
13264
13264
return;
13265
13265
13266
- // In absense of Zfh, promote f16 to f32, then convert.
13266
+ // In absence of Zfh, promote f16 to f32, then convert.
13267
13267
if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx())
13268
13268
Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op0);
13269
13269
@@ -13890,7 +13890,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
13890
13890
static unsigned getVecReduceOpcode(unsigned Opc) {
13891
13891
switch (Opc) {
13892
13892
default:
13893
- llvm_unreachable("Unhandled binary to transfrom reduction");
13893
+ llvm_unreachable("Unhandled binary to transform reduction");
13894
13894
case ISD::ADD:
13895
13895
return ISD::VECREDUCE_ADD;
13896
13896
case ISD::UMAX:
@@ -14020,7 +14020,7 @@ static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG,
14020
14020
auto BinOpToRVVReduce = [](unsigned Opc) {
14021
14021
switch (Opc) {
14022
14022
default:
14023
- llvm_unreachable("Unhandled binary to transfrom reduction");
14023
+ llvm_unreachable("Unhandled binary to transform reduction");
14024
14024
case ISD::ADD:
14025
14025
return RISCVISD::VECREDUCE_ADD_VL;
14026
14026
case ISD::UMAX:
@@ -15577,7 +15577,7 @@ struct NodeExtensionHelper {
15577
15577
15578
15578
bool isSupportedFPExtend(SDNode *Root, MVT NarrowEltVT,
15579
15579
const RISCVSubtarget &Subtarget) {
15580
- // Any f16 extension will neeed zvfh
15580
+ // Any f16 extension will need zvfh
15581
15581
if (NarrowEltVT == MVT::f16 && !Subtarget.hasVInstructionsF16())
15582
15582
return false;
15583
15583
// The only bf16 extension we can do is vfmadd_vl -> vfwmadd_vl with
@@ -16326,7 +16326,7 @@ static SDValue performMemPairCombine(SDNode *N,
16326
16326
if (Base1 != Base2)
16327
16327
continue;
16328
16328
16329
- // Check if the offsets match the XTHeadMemPair encoding contraints .
16329
+ // Check if the offsets match the XTHeadMemPair encoding constraints .
16330
16330
bool Valid = false;
16331
16331
if (MemVT == MVT::i32) {
16332
16332
// Check for adjacent i32 values and a 2-bit index.
@@ -16954,7 +16954,7 @@ static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
16954
16954
}
16955
16955
16956
16956
// Invert (and/or (set cc X, Y), (xor Z, 1)) to (or/and (set !cc X, Y)), Z) if
16957
- // the result is used as the conditon of a br_cc or select_cc we can invert,
16957
+ // the result is used as the condition of a br_cc or select_cc we can invert,
16958
16958
// inverting the setcc is free, and Z is 0/1. Caller will invert the
16959
16959
// br_cc/select_cc.
16960
16960
static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) {
@@ -17015,7 +17015,7 @@ static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) {
17015
17015
return DAG.getNode(Opc, SDLoc(Cond), VT, Setcc, Xor.getOperand(0));
17016
17016
}
17017
17017
17018
- // Perform common combines for BR_CC and SELECT_CC condtions .
17018
+ // Perform common combines for BR_CC and SELECT_CC conditions .
17019
17019
static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL,
17020
17020
SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {
17021
17021
ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
@@ -18603,7 +18603,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
18603
18603
const int64_t Addend = SimpleVID->Addend;
18604
18604
18605
18605
// Note: We don't need to check alignment here since (by assumption
18606
- // from the existance of the gather), our offsets must be sufficiently
18606
+ // from the existence of the gather), our offsets must be sufficiently
18607
18607
// aligned.
18608
18608
18609
18609
const EVT PtrVT = getPointerTy(DAG.getDataLayout());
@@ -20639,7 +20639,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
20639
20639
EVT PtrVT = getPointerTy(DAG.getDataLayout());
20640
20640
MVT XLenVT = Subtarget.getXLenVT();
20641
20641
unsigned XLenInBytes = Subtarget.getXLen() / 8;
20642
- // Used with vargs to acumulate store chains.
20642
+ // Used with vargs to accumulate store chains.
20643
20643
std::vector<SDValue> OutChains;
20644
20644
20645
20645
// Assign locations to all of the incoming arguments.
0 commit comments