From 53ac808988d9e03b6a91580309cfd6687afab9ea Mon Sep 17 00:00:00 2001 From: Sudharsan Veeravalli Date: Fri, 7 Feb 2025 12:19:22 +0530 Subject: [PATCH] [RISCV] Fix typos discovered by codespell NFC Found using https://github.com/codespell-project/codespell codespell RISCV --ignore-words-list=FPR,fpr,VAs,ORE,WorstCase,hart,sie,MIs,FLE,fle,CarryIn,vor,OLT,VILL,vill,bu,pass-thru --write-changes --- .../Target/RISCV/AsmParser/RISCVAsmParser.cpp | 4 +- .../RISCV/GISel/RISCVInstructionSelector.cpp | 4 +- .../Target/RISCV/MCTargetDesc/RISCVMatInt.cpp | 2 +- .../MCTargetDesc/RISCVTargetStreamer.cpp | 2 +- llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp | 2 +- llvm/lib/Target/RISCV/RISCVFeatures.td | 4 +- llvm/lib/Target/RISCV/RISCVFrameLowering.cpp | 2 +- .../RISCV/RISCVGatherScatterLowering.cpp | 2 +- llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 2 +- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 38 +++++++++---------- .../RISCV/RISCVIndirectBranchTracking.cpp | 2 +- llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 2 +- .../lib/Target/RISCV/RISCVInsertWriteVXRM.cpp | 10 ++--- llvm/lib/Target/RISCV/RISCVInstrFormats.td | 2 +- llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 2 +- .../Target/RISCV/RISCVMakeCompressible.cpp | 8 ++-- .../lib/Target/RISCV/RISCVMergeBaseOffset.cpp | 4 +- llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp | 2 +- llvm/lib/Target/RISCV/RISCVSchedSiFive7.td | 4 +- llvm/lib/Target/RISCV/RISCVScheduleV.td | 2 +- llvm/lib/Target/RISCV/RISCVTargetMachine.cpp | 2 +- .../Target/RISCV/RISCVTargetTransformInfo.cpp | 6 +-- .../RISCV/RISCVVectorMaskDAGMutation.cpp | 2 +- 23 files changed, 55 insertions(+), 55 deletions(-) diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index c51c4201ebd18..d050194142a47 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -130,7 +130,7 @@ class RISCVAsmParser : public MCTargetAsmParser { void emitToStreamer(MCStreamer &S, const MCInst &Inst); // Helper to emit a combination of LUI, ADDI(W), and SLLI instructions that - // synthesize the desired immedate value into the destination register. + // synthesize the desired immediate value into the destination register. void emitLoadImm(MCRegister DestReg, int64_t Value, MCStreamer &Out); // Helper to emit a combination of AUIPC and SecondOpcode. Used to implement @@ -2626,7 +2626,7 @@ ParseStatus RISCVAsmParser::parseZeroOffsetMemOp(OperandVector &Operands) { std::unique_ptr OptionalImmOp; if (getLexer().isNot(AsmToken::LParen)) { - // Parse an Integer token. We do not accept arbritrary constant expressions + // Parse an Integer token. We do not accept arbitrary constant expressions // in the offset field (because they may include parens, which complicates // parsing a lot). int64_t ImmVal; diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp index d5d422226281b..62fbe55dffba1 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp @@ -621,7 +621,7 @@ static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, return; } - // We found an ICmp, do some canonicalizations. + // We found an ICmp, do some canonicalization. // Adjust comparisons to use comparison with 0 if possible. if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) { @@ -735,7 +735,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) { return true; } case TargetOpcode::G_FCONSTANT: { - // TODO: Use constant pool for complext constants. + // TODO: Use constant pool for complex constants. // TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32. Register DstReg = MI.getOperand(0).getReg(); const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF(); diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp index 26725cf7decbe..06ae8e1296e51 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp @@ -175,7 +175,7 @@ static unsigned extractRotateInfo(int64_t Val) { static void generateInstSeqLeadingZeros(int64_t Val, const MCSubtargetInfo &STI, RISCVMatInt::InstSeq &Res) { - assert(Val > 0 && "Expected postive val"); + assert(Val > 0 && "Expected positive val"); unsigned LeadingZeros = llvm::countl_zero((uint64_t)Val); uint64_t ShiftedVal = (uint64_t)Val << LeadingZeros; diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp index 99f57f47835ab..72b3e56c8a72f 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp @@ -21,7 +21,7 @@ using namespace llvm; -// This option controls wether or not we emit ELF attributes for ABI features, +// This option controls whether or not we emit ELF attributes for ABI features, // like RISC-V atomics or X3 usage. static cl::opt RiscvAbiAttr( "riscv-abi-attributes", diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp index b1990409754b0..7dcf2ba2ac405 100644 --- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp +++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp @@ -1089,7 +1089,7 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI, bool hasVLOutput = RISCV::isFaultFirstLoad(*MI); for (unsigned OpNo = 0; OpNo != NumOps; ++OpNo) { const MachineOperand &MO = MI->getOperand(OpNo); - // Skip vl ouput. It should be the second output. + // Skip vl output. It should be the second output. if (hasVLOutput && OpNo == 1) continue; diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index f050977c55e19..51aa8d7d307e4 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -1020,7 +1020,7 @@ def HasStdExtSmctrOrSsctr : Predicate<"Subtarget->hasStdExtSmctrOrSsctr()">, // Vendor extensions //===----------------------------------------------------------------------===// -// Ventana Extenions +// Ventana Extensions def FeatureVendorXVentanaCondOps : RISCVExtension<1, 0, "Ventana Conditional Ops">; @@ -1337,7 +1337,7 @@ def HasVendorXqcilo // LLVM specific features and extensions //===----------------------------------------------------------------------===// -// Feature32Bit exists to mark CPUs that support RV32 to distinquish them from +// Feature32Bit exists to mark CPUs that support RV32 to distinguish them from // tuning CPU names. def Feature32Bit : SubtargetFeature<"32bit", "IsRV32", "true", "Implements RV32">; diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp index 4beaa1e6b9e15..6abf45591d78e 100644 --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -1182,7 +1182,7 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF, if (getLibCallID(MF, CSI) != -1) { // tail __riscv_restore_[0-12] instruction is considered as a terminator, - // therefor it is unnecessary to place any CFI instructions after it. Just + // therefore it is unnecessary to place any CFI instructions after it. Just // deallocate stack if needed and return. if (StackSize != 0) deallocateStack(MF, MBB, MBBI, DL, StackSize, diff --git a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp index 39c0af7985971..82c0d8d4738a4 100644 --- a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp @@ -131,7 +131,7 @@ static std::pair matchStridedStart(Value *Start, } // Not a constant, maybe it's a strided constant with a splat added or - // multipled. + // multiplied. auto *BO = dyn_cast(Start); if (!BO || (BO->getOpcode() != Instruction::Add && BO->getOpcode() != Instruction::Or && diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 82fb8fb8ccc69..ec2e8f1d50264 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -3499,7 +3499,7 @@ bool RISCVDAGToDAGISel::selectSimm5Shl2(SDValue N, SDValue &Simm5, } // Select VL as a 5 bit immediate or a value that will become a register. This -// allows us to choose betwen VSETIVLI or VSETVLI later. +// allows us to choose between VSETIVLI or VSETVLI later. bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) { auto *C = dyn_cast(N); if (C && isUInt<5>(C->getZExtValue())) { diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index d91ba33c23596..13ce566f8def6 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2077,7 +2077,7 @@ bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, if (isInt<32>(Val)) return true; - // A constant pool entry may be more aligned thant he load we're trying to + // A constant pool entry may be more aligned than the load we're trying to // replace. If we don't support unaligned scalar mem, prefer the constant // pool. // TODO: Can the caller pass down the alignment? @@ -2921,7 +2921,7 @@ static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT; if (!DstVT.isVector()) { - // For bf16 or for f16 in absense of Zfh, promote to f32, then saturate + // For bf16 or for f16 in absence of Zfh, promote to f32, then saturate // the result. if ((Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) || Src.getValueType() == MVT::bf16) { @@ -3186,7 +3186,7 @@ lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, // Expand vector STRICT_FTRUNC, STRICT_FCEIL, STRICT_FFLOOR, STRICT_FROUND // STRICT_FROUNDEVEN and STRICT_FNEARBYINT by converting sNan of the source to -// qNan and coverting the new source to integer and back to FP. +// qNan and converting the new source to integer and back to FP. static SDValue lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { @@ -3206,7 +3206,7 @@ lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, // Freeze the source since we are increasing the number of uses. Src = DAG.getFreeze(Src); - // Covert sNan to qNan by executing x + x for all unordered elemenet x in Src. + // Convert sNan to qNan by executing x + x for all unordered element x in Src. MVT MaskVT = Mask.getSimpleValueType(); SDValue Unorder = DAG.getNode(RISCVISD::STRICT_FSETCC_VL, DL, DAG.getVTList(MaskVT, MVT::Other), @@ -3724,7 +3724,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG, unsigned NumViaIntegerBits = std::clamp(NumElts, 8u, Subtarget.getXLen()); NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELen()); // If we have to use more than one INSERT_VECTOR_ELT then this - // optimization is likely to increase code size; avoid peforming it in + // optimization is likely to increase code size; avoid performing it in // such a case. We can use a load from a constant pool in this case. if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits) return SDValue(); @@ -4618,7 +4618,7 @@ static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef Mask) { int MaskSrc = M < Size ? 0 : 1; // Compute which of the two target values this index should be assigned to. - // This reflects whether the high elements are remaining or the low elemnts + // This reflects whether the high elements are remaining or the low elements // are remaining. int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc; @@ -8567,7 +8567,7 @@ SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { SDValue RHS = CondV.getOperand(1); ISD::CondCode CCVal = cast(CondV.getOperand(2))->get(); - // Special case for a select of 2 constants that have a diffence of 1. + // Special case for a select of 2 constants that have a difference of 1. // Normally this is done by DAGCombine, but if the select is introduced by // type legalization or op legalization, we miss it. Restricting to SETLT // case for now because that is what signed saturating add/sub need. @@ -9717,7 +9717,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG, // We need to convert from a scalable VF to a vsetvli with VLMax equal to // (vscale * VF). The vscale and VF are independent of element width. We use // SEW=8 for the vsetvli because it is the only element width that supports all -// fractional LMULs. The LMUL is choosen so that with SEW=8 the VLMax is +// fractional LMULs. The LMUL is chosen so that with SEW=8 the VLMax is // (vscale * VF). Where vscale is defined as VLEN/RVVBitsPerBlock. The // InsertVSETVLI pass can fix up the vtype of the vsetvli if a different // SEW and LMUL are better for the surrounding vector instructions. @@ -13203,7 +13203,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, return; if (IsStrict) { SDValue Chain = N->getOperand(0); - // In absense of Zfh, promote f16 to f32, then convert. + // In absence of Zfh, promote f16 to f32, then convert. if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) { Op0 = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other}, @@ -13220,7 +13220,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, Results.push_back(Res.getValue(1)); return; } - // For bf16, or f16 in absense of Zfh, promote [b]f16 to f32 and then + // For bf16, or f16 in absence of Zfh, promote [b]f16 to f32 and then // convert. if ((Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) || @@ -13263,7 +13263,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, if (!isTypeLegal(Op0VT)) return; - // In absense of Zfh, promote f16 to f32, then convert. + // In absence of Zfh, promote f16 to f32, then convert. if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op0); @@ -13890,7 +13890,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, static unsigned getVecReduceOpcode(unsigned Opc) { switch (Opc) { default: - llvm_unreachable("Unhandled binary to transfrom reduction"); + llvm_unreachable("Unhandled binary to transform reduction"); case ISD::ADD: return ISD::VECREDUCE_ADD; case ISD::UMAX: @@ -14020,7 +14020,7 @@ static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG, auto BinOpToRVVReduce = [](unsigned Opc) { switch (Opc) { default: - llvm_unreachable("Unhandled binary to transfrom reduction"); + llvm_unreachable("Unhandled binary to transform reduction"); case ISD::ADD: return RISCVISD::VECREDUCE_ADD_VL; case ISD::UMAX: @@ -15577,7 +15577,7 @@ struct NodeExtensionHelper { bool isSupportedFPExtend(SDNode *Root, MVT NarrowEltVT, const RISCVSubtarget &Subtarget) { - // Any f16 extension will neeed zvfh + // Any f16 extension will need zvfh if (NarrowEltVT == MVT::f16 && !Subtarget.hasVInstructionsF16()) return false; // The only bf16 extension we can do is vfmadd_vl -> vfwmadd_vl with @@ -16326,7 +16326,7 @@ static SDValue performMemPairCombine(SDNode *N, if (Base1 != Base2) continue; - // Check if the offsets match the XTHeadMemPair encoding contraints. + // Check if the offsets match the XTHeadMemPair encoding constraints. bool Valid = false; if (MemVT == MVT::i32) { // Check for adjacent i32 values and a 2-bit index. @@ -16954,7 +16954,7 @@ static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG, } // Invert (and/or (set cc X, Y), (xor Z, 1)) to (or/and (set !cc X, Y)), Z) if -// the result is used as the conditon of a br_cc or select_cc we can invert, +// the result is used as the condition of a br_cc or select_cc we can invert, // inverting the setcc is free, and Z is 0/1. Caller will invert the // br_cc/select_cc. static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) { @@ -17015,7 +17015,7 @@ static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) { return DAG.getNode(Opc, SDLoc(Cond), VT, Setcc, Xor.getOperand(0)); } -// Perform common combines for BR_CC and SELECT_CC condtions. +// Perform common combines for BR_CC and SELECT_CC conditions. static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { ISD::CondCode CCVal = cast(CC)->get(); @@ -18603,7 +18603,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, const int64_t Addend = SimpleVID->Addend; // Note: We don't need to check alignment here since (by assumption - // from the existance of the gather), our offsets must be sufficiently + // from the existence of the gather), our offsets must be sufficiently // aligned. const EVT PtrVT = getPointerTy(DAG.getDataLayout()); @@ -20639,7 +20639,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments( EVT PtrVT = getPointerTy(DAG.getDataLayout()); MVT XLenVT = Subtarget.getXLenVT(); unsigned XLenInBytes = Subtarget.getXLen() / 8; - // Used with vargs to acumulate store chains. + // Used with vargs to accumulate store chains. std::vector OutChains; // Assign locations to all of the incoming arguments. diff --git a/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp b/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp index c006fba4af4bc..4660a975b20ae 100644 --- a/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp +++ b/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// // // The pass adds LPAD (AUIPC with rs1 = X0) machine instructions at the -// beginning of each basic block or function that is referenced by an indrect +// beginning of each basic block or function that is referenced by an indirect // jump/call instruction. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index b56a39d8316d1..4a74906ed3cc3 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -1069,7 +1069,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const { const MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI)); if (VLOp.isImm()) { int64_t Imm = VLOp.getImm(); - // Conver the VLMax sentintel to X0 register. + // Convert the VLMax sentintel to X0 register. if (Imm == RISCV::VLMaxSentinel) { // If we know the exact VLEN, see if we can use the constant encoding // for the VLMAX instead. This reduces register pressure slightly. diff --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp index fe593a3cabad7..7df04fc225b0b 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp @@ -170,10 +170,10 @@ struct BlockData { // Indicates if the block uses VXRM. Uninitialized means no use. VXRMInfo VXRMUse; - // Indicates the VXRM output from the block. Unitialized means transparent. + // Indicates the VXRM output from the block. Uninitialized means transparent. VXRMInfo VXRMOut; - // Keeps track of the available VXRM value at the start of the basic bloc. + // Keeps track of the available VXRM value at the start of the basic block. VXRMInfo AvailableIn; // Keeps track of the available VXRM value at the end of the basic block. @@ -384,8 +384,8 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) { PInfo.AvailableOut.getVXRMImm() == BBInfo.AnticipatedIn.getVXRMImm()) continue; - // If the predecessor anticipates this value for all its succesors, - // then a write to VXRM would have already occured before this block is + // If the predecessor anticipates this value for all its successors, + // then a write to VXRM would have already occurred before this block is // executed. if (PInfo.AnticipatedOut.isStatic() && PInfo.AnticipatedOut.getVXRMImm() == @@ -429,7 +429,7 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) { // If all our successors anticipate a value, do the insert. // NOTE: It's possible that not all predecessors of our successor provide the // correct value. This can occur on critical edges. If we don't split the - // critical edge we'll also have a write vxrm in the succesor that is + // critical edge we'll also have a write vxrm in the successor that is // redundant with this one. if (PendingInsert || (BBInfo.AnticipatedOut.isStatic() && diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td index 013c26c72bfd5..cea28bdce284c 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -232,7 +232,7 @@ class RVInstCommon not a vector pseudo // 1 -> default value for vector pseudos. not widening or narrowing. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 12a7af0750813..773319ba908c8 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -1516,7 +1516,7 @@ RISCVInstrInfo::optimizeSelect(MachineInstr &MI, SeenMIs.erase(DefMI); // If MI is inside a loop, and DefMI is outside the loop, then kill flags on - // DefMI would be invalid when tranferred inside the loop. Checking for a + // DefMI would be invalid when transferred inside the loop. Checking for a // loop is expensive, but at least remove kill flags if they are in different // BBs. if (DefMI->getParent() != MI.getParent()) diff --git a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp index df5501e37f831..5453753fa4579 100644 --- a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp +++ b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp @@ -17,7 +17,7 @@ // For case 1, if a compressed register is available, then the uncompressed // register is copied to the compressed register and its uses are replaced. // -// For example, storing zero uses the uncompressible zero register: +// For example, storing zero uses the incompressible zero register: // sw zero, 0(a0) # if zero // sw zero, 8(a0) # if zero // sw zero, 4(a0) # if zero @@ -275,7 +275,7 @@ static RegImmPair getRegImmPairPreventingCompression(const MachineInstr &MI) { // rather than used. // // For stores, we can change SrcDest (and Base if SrcDest == Base) but - // cannot resolve an uncompressible offset in this case. + // cannot resolve an incompressible offset in this case. if (isCompressibleStore(MI)) { if (!SrcDestCompressed && (BaseCompressed || SrcDest == Base) && !NewBaseAdjust) @@ -313,7 +313,7 @@ static Register analyzeCompressibleUses(MachineInstr &FirstMI, // If RegImm.Reg is modified by this instruction, then we cannot optimize // past this instruction. If the register is already compressed, then it may // possible to optimize a large offset in the current instruction - this - // will have been detected by the preceeding call to + // will have been detected by the preceding call to // getRegImmPairPreventingCompression. if (MI.modifiesRegister(RegImm.Reg, TRI)) break; @@ -409,7 +409,7 @@ bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) { LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n"); for (MachineInstr &MI : MBB) { // Determine if this instruction would otherwise be compressed if not for - // an uncompressible register or offset. + // an incompressible register or offset. RegImmPair RegImm = getRegImmPairPreventingCompression(MI); if (!RegImm.Reg && RegImm.Imm == 0) continue; diff --git a/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp b/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp index a324deb4e48f5..bbbb1e1595982 100644 --- a/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp +++ b/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp @@ -434,8 +434,8 @@ bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi, // Memory constraints have two operands. if (NumOps != 2 || !Flags.isMemKind()) { - // If the register is used by something other than a memory contraint, - // we should not fold. + // If the register is used by something other than a memory + // constraint, we should not fold. for (unsigned J = 0; J < NumOps; ++J) { const MachineOperand &MO = UseMI.getOperand(I + 1 + J); if (MO.isReg() && MO.getReg() == DestReg) diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp index 7a99bfd1b2512..12d54313a96ab 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -286,7 +286,7 @@ void RISCVRegisterInfo::adjustReg(MachineBasicBlock &MBB, // instruction. This saves 1 instruction over the full lui/addi+add fallback // path. We avoid anything which can be done with a single lui as it might // be compressible. Note that the sh1add case is fully covered by the 2x addi - // case just above and is thus ommitted. + // case just above and is thus omitted. if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) { unsigned Opc = 0; if (isShiftedInt<12, 3>(Val)) { diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td index 9f7cd411a4943..f4d2073d3b52d 100644 --- a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td +++ b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td @@ -159,7 +159,7 @@ class SiFive7GetReductionCycles { ); } -/// Cycles for ordered reductions take approximatley 6*VL cycles +/// Cycles for ordered reductions take approximately 6*VL cycles class SiFive7GetOrderedReductionCycles { defvar VLEN = 512; // (VLEN * LMUL) / SEW @@ -224,7 +224,7 @@ def SiFive7VS : ProcResource<1>; // Store sequencer // vector unit is ready to dequeue them. The unit dequeues up to one instruction // per cycle, in order, as soon as the sequencer for that type of instruction is // available. This resource is meant to be used for 1 cycle by all vector -// instructions, to model that only one vector instruction may be dequed at a +// instructions, to model that only one vector instruction may be dequeued at a // time. The actual dequeueing into the sequencer is modeled by the VA, VL, and // VS sequencer resources below. Each of them will only accept a single // instruction at a time and remain busy for the number of cycles associated diff --git a/llvm/lib/Target/RISCV/RISCVScheduleV.td b/llvm/lib/Target/RISCV/RISCVScheduleV.td index 6b9f1dd321891..0204ab4c98286 100644 --- a/llvm/lib/Target/RISCV/RISCVScheduleV.td +++ b/llvm/lib/Target/RISCV/RISCVScheduleV.td @@ -70,7 +70,7 @@ multiclass LMULSEWWriteResMXSEW resources, // behavior is aliased to a Variant. The Variant has Latency predLad and // ReleaseAtCycles predCycles if the SchedPredicate Pred is true, otherwise has // Latency noPredLat and ReleaseAtCycles noPredCycles. The WorstCase SchedWrite -// is created similiarly if IsWorstCase is true. +// is created similarly if IsWorstCase is true. multiclass LMULWriteResMXVariant resources, int predLat, list predAcquireCycles, diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp index 3e4949232298e..4a69bdeb76161 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -78,7 +78,7 @@ static cl::opt EnableRISCVCopyPropagation( static cl::opt EnableRISCVDeadRegisterElimination( "riscv-enable-dead-defs", cl::Hidden, cl::desc("Enable the pass that removes dead" - " definitons and replaces stores to" + " definitions and replaces stores to" " them with stores to x0"), cl::init(true)); diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index 812592365a41a..b3ddd07902a5c 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -673,7 +673,7 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, // We use 2 for the cost of the mask materialization as this is the true // cost for small masks and most shuffles are small. At worst, this cost // should be a very small constant for the constant pool load. As such, - // we may bias towards large selects slightly more than truely warranted. + // we may bias towards large selects slightly more than truly warranted. return LT.first * (1 + getRISCVInstructionCost({RISCV::VMV_S_X, RISCV::VMERGE_VVM}, LT.second, CostKind)); @@ -2396,7 +2396,7 @@ InstructionCost RISCVTTIImpl::getPointersChainCost( // either GEP instructions, PHIs, bitcasts or constants. When we have same // base, we just calculate cost of each non-Base GEP as an ADD operation if // any their index is a non-const. - // If no known dependecies between the pointers cost is calculated as a sum + // If no known dependencies between the pointers cost is calculated as a sum // of costs of GEP instructions. for (auto [I, V] : enumerate(Ptrs)) { const auto *GEP = dyn_cast(V); @@ -2440,7 +2440,7 @@ void RISCVTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, if (ST->enableDefaultUnroll()) return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP, ORE); - // Enable Upper bound unrolling universally, not dependant upon the conditions + // Enable Upper bound unrolling universally, not dependent upon the conditions // below. UP.UpperBound = true; diff --git a/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp b/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp index c48a97b12e43f..0bddbacc89e3e 100644 --- a/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp +++ b/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp @@ -12,7 +12,7 @@ // // The reason why we need to do this: // 1. When tracking register pressure, we don't track physical registers. -// 2. We have a RegisterClass for mask reigster (which is `VMV0`), but we don't +// 2. We have a RegisterClass for mask register (which is `VMV0`), but we don't // use it in most RVV pseudos (only used in inline asm constraint and add/sub // with carry instructions). Instead, we use physical register V0 directly // and insert a `$v0 = COPY ...` before the use. And, there is a fundamental