Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ class RISCVAsmParser : public MCTargetAsmParser {
void emitToStreamer(MCStreamer &S, const MCInst &Inst);

// Helper to emit a combination of LUI, ADDI(W), and SLLI instructions that
// synthesize the desired immedate value into the destination register.
// synthesize the desired immediate value into the destination register.
void emitLoadImm(MCRegister DestReg, int64_t Value, MCStreamer &Out);

// Helper to emit a combination of AUIPC and SecondOpcode. Used to implement
Expand Down Expand Up @@ -2626,7 +2626,7 @@ ParseStatus RISCVAsmParser::parseZeroOffsetMemOp(OperandVector &Operands) {
std::unique_ptr<RISCVOperand> OptionalImmOp;

if (getLexer().isNot(AsmToken::LParen)) {
// Parse an Integer token. We do not accept arbritrary constant expressions
// Parse an Integer token. We do not accept arbitrary constant expressions
// in the offset field (because they may include parens, which complicates
// parsing a lot).
int64_t ImmVal;
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -621,7 +621,7 @@ static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
return;
}

// We found an ICmp, do some canonicalizations.
// We found an ICmp, do some canonicalization.

// Adjust comparisons to use comparison with 0 if possible.
if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) {
Expand Down Expand Up @@ -735,7 +735,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
return true;
}
case TargetOpcode::G_FCONSTANT: {
// TODO: Use constant pool for complext constants.
// TODO: Use constant pool for complex constants.
// TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32.
Register DstReg = MI.getOperand(0).getReg();
const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ static unsigned extractRotateInfo(int64_t Val) {

static void generateInstSeqLeadingZeros(int64_t Val, const MCSubtargetInfo &STI,
RISCVMatInt::InstSeq &Res) {
assert(Val > 0 && "Expected postive val");
assert(Val > 0 && "Expected positive val");

unsigned LeadingZeros = llvm::countl_zero((uint64_t)Val);
uint64_t ShiftedVal = (uint64_t)Val << LeadingZeros;
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

using namespace llvm;

// This option controls wether or not we emit ELF attributes for ABI features,
// This option controls whether or not we emit ELF attributes for ABI features,
// like RISC-V atomics or X3 usage.
static cl::opt<bool> RiscvAbiAttr(
"riscv-abi-attributes",
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1089,7 +1089,7 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
bool hasVLOutput = RISCV::isFaultFirstLoad(*MI);
for (unsigned OpNo = 0; OpNo != NumOps; ++OpNo) {
const MachineOperand &MO = MI->getOperand(OpNo);
// Skip vl ouput. It should be the second output.
// Skip vl output. It should be the second output.
if (hasVLOutput && OpNo == 1)
continue;

Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/RISCV/RISCVFeatures.td
Original file line number Diff line number Diff line change
Expand Up @@ -1020,7 +1020,7 @@ def HasStdExtSmctrOrSsctr : Predicate<"Subtarget->hasStdExtSmctrOrSsctr()">,
// Vendor extensions
//===----------------------------------------------------------------------===//

// Ventana Extenions
// Ventana Extensions

def FeatureVendorXVentanaCondOps
: RISCVExtension<1, 0, "Ventana Conditional Ops">;
Expand Down Expand Up @@ -1337,7 +1337,7 @@ def HasVendorXqcilo
// LLVM specific features and extensions
//===----------------------------------------------------------------------===//

// Feature32Bit exists to mark CPUs that support RV32 to distinquish them from
// Feature32Bit exists to mark CPUs that support RV32 to distinguish them from
// tuning CPU names.
def Feature32Bit
: SubtargetFeature<"32bit", "IsRV32", "true", "Implements RV32">;
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1182,7 +1182,7 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,

if (getLibCallID(MF, CSI) != -1) {
// tail __riscv_restore_[0-12] instruction is considered as a terminator,
// therefor it is unnecessary to place any CFI instructions after it. Just
// therefore it is unnecessary to place any CFI instructions after it. Just
// deallocate stack if needed and return.
if (StackSize != 0)
deallocateStack(MF, MBB, MBBI, DL, StackSize,
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ static std::pair<Value *, Value *> matchStridedStart(Value *Start,
}

// Not a constant, maybe it's a strided constant with a splat added or
// multipled.
// multiplied.
auto *BO = dyn_cast<BinaryOperator>(Start);
if (!BO || (BO->getOpcode() != Instruction::Add &&
BO->getOpcode() != Instruction::Or &&
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3499,7 +3499,7 @@ bool RISCVDAGToDAGISel::selectSimm5Shl2(SDValue N, SDValue &Simm5,
}

// Select VL as a 5 bit immediate or a value that will become a register. This
// allows us to choose betwen VSETIVLI or VSETVLI later.
// allows us to choose between VSETIVLI or VSETVLI later.
bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
auto *C = dyn_cast<ConstantSDNode>(N);
if (C && isUInt<5>(C->getZExtValue())) {
Expand Down
38 changes: 19 additions & 19 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2077,7 +2077,7 @@ bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
if (isInt<32>(Val))
return true;

// A constant pool entry may be more aligned thant he load we're trying to
// A constant pool entry may be more aligned than the load we're trying to
// replace. If we don't support unaligned scalar mem, prefer the constant
// pool.
// TODO: Can the caller pass down the alignment?
Expand Down Expand Up @@ -2921,7 +2921,7 @@ static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;

if (!DstVT.isVector()) {
// For bf16 or for f16 in absense of Zfh, promote to f32, then saturate
// For bf16 or for f16 in absence of Zfh, promote to f32, then saturate
// the result.
if ((Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) ||
Src.getValueType() == MVT::bf16) {
Expand Down Expand Up @@ -3186,7 +3186,7 @@ lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,

// Expand vector STRICT_FTRUNC, STRICT_FCEIL, STRICT_FFLOOR, STRICT_FROUND
// STRICT_FROUNDEVEN and STRICT_FNEARBYINT by converting sNan of the source to
// qNan and coverting the new source to integer and back to FP.
// qNan and converting the new source to integer and back to FP.
static SDValue
lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
Expand All @@ -3206,7 +3206,7 @@ lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
// Freeze the source since we are increasing the number of uses.
Src = DAG.getFreeze(Src);

// Covert sNan to qNan by executing x + x for all unordered elemenet x in Src.
// Convert sNan to qNan by executing x + x for all unordered element x in Src.
MVT MaskVT = Mask.getSimpleValueType();
SDValue Unorder = DAG.getNode(RISCVISD::STRICT_FSETCC_VL, DL,
DAG.getVTList(MaskVT, MVT::Other),
Expand Down Expand Up @@ -3724,7 +3724,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
unsigned NumViaIntegerBits = std::clamp(NumElts, 8u, Subtarget.getXLen());
NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELen());
// If we have to use more than one INSERT_VECTOR_ELT then this
// optimization is likely to increase code size; avoid peforming it in
// optimization is likely to increase code size; avoid performing it in
// such a case. We can use a load from a constant pool in this case.
if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
return SDValue();
Expand Down Expand Up @@ -4618,7 +4618,7 @@ static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
int MaskSrc = M < Size ? 0 : 1;

// Compute which of the two target values this index should be assigned to.
// This reflects whether the high elements are remaining or the low elemnts
// This reflects whether the high elements are remaining or the low elements
// are remaining.
int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;

Expand Down Expand Up @@ -8567,7 +8567,7 @@ SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
SDValue RHS = CondV.getOperand(1);
ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();

// Special case for a select of 2 constants that have a diffence of 1.
// Special case for a select of 2 constants that have a difference of 1.
// Normally this is done by DAGCombine, but if the select is introduced by
// type legalization or op legalization, we miss it. Restricting to SETLT
// case for now because that is what signed saturating add/sub need.
Expand Down Expand Up @@ -9717,7 +9717,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
// We need to convert from a scalable VF to a vsetvli with VLMax equal to
// (vscale * VF). The vscale and VF are independent of element width. We use
// SEW=8 for the vsetvli because it is the only element width that supports all
// fractional LMULs. The LMUL is choosen so that with SEW=8 the VLMax is
// fractional LMULs. The LMUL is chosen so that with SEW=8 the VLMax is
// (vscale * VF). Where vscale is defined as VLEN/RVVBitsPerBlock. The
// InsertVSETVLI pass can fix up the vtype of the vsetvli if a different
// SEW and LMUL are better for the surrounding vector instructions.
Expand Down Expand Up @@ -13203,7 +13203,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
return;
if (IsStrict) {
SDValue Chain = N->getOperand(0);
// In absense of Zfh, promote f16 to f32, then convert.
// In absence of Zfh, promote f16 to f32, then convert.
if (Op0.getValueType() == MVT::f16 &&
!Subtarget.hasStdExtZfhOrZhinx()) {
Op0 = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other},
Expand All @@ -13220,7 +13220,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
Results.push_back(Res.getValue(1));
return;
}
// For bf16, or f16 in absense of Zfh, promote [b]f16 to f32 and then
// For bf16, or f16 in absence of Zfh, promote [b]f16 to f32 and then
// convert.
if ((Op0.getValueType() == MVT::f16 &&
!Subtarget.hasStdExtZfhOrZhinx()) ||
Expand Down Expand Up @@ -13263,7 +13263,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
if (!isTypeLegal(Op0VT))
return;

// In absense of Zfh, promote f16 to f32, then convert.
// In absence of Zfh, promote f16 to f32, then convert.
if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx())
Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op0);

Expand Down Expand Up @@ -13890,7 +13890,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
static unsigned getVecReduceOpcode(unsigned Opc) {
switch (Opc) {
default:
llvm_unreachable("Unhandled binary to transfrom reduction");
llvm_unreachable("Unhandled binary to transform reduction");
case ISD::ADD:
return ISD::VECREDUCE_ADD;
case ISD::UMAX:
Expand Down Expand Up @@ -14020,7 +14020,7 @@ static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG,
auto BinOpToRVVReduce = [](unsigned Opc) {
switch (Opc) {
default:
llvm_unreachable("Unhandled binary to transfrom reduction");
llvm_unreachable("Unhandled binary to transform reduction");
case ISD::ADD:
return RISCVISD::VECREDUCE_ADD_VL;
case ISD::UMAX:
Expand Down Expand Up @@ -15577,7 +15577,7 @@ struct NodeExtensionHelper {

bool isSupportedFPExtend(SDNode *Root, MVT NarrowEltVT,
const RISCVSubtarget &Subtarget) {
// Any f16 extension will neeed zvfh
// Any f16 extension will need zvfh
if (NarrowEltVT == MVT::f16 && !Subtarget.hasVInstructionsF16())
return false;
// The only bf16 extension we can do is vfmadd_vl -> vfwmadd_vl with
Expand Down Expand Up @@ -16326,7 +16326,7 @@ static SDValue performMemPairCombine(SDNode *N,
if (Base1 != Base2)
continue;

// Check if the offsets match the XTHeadMemPair encoding contraints.
// Check if the offsets match the XTHeadMemPair encoding constraints.
bool Valid = false;
if (MemVT == MVT::i32) {
// Check for adjacent i32 values and a 2-bit index.
Expand Down Expand Up @@ -16954,7 +16954,7 @@ static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
}

// Invert (and/or (set cc X, Y), (xor Z, 1)) to (or/and (set !cc X, Y)), Z) if
// the result is used as the conditon of a br_cc or select_cc we can invert,
// the result is used as the condition of a br_cc or select_cc we can invert,
// inverting the setcc is free, and Z is 0/1. Caller will invert the
// br_cc/select_cc.
static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) {
Expand Down Expand Up @@ -17015,7 +17015,7 @@ static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) {
return DAG.getNode(Opc, SDLoc(Cond), VT, Setcc, Xor.getOperand(0));
}

// Perform common combines for BR_CC and SELECT_CC condtions.
// Perform common combines for BR_CC and SELECT_CC conditions.
static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL,
SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {
ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
Expand Down Expand Up @@ -18603,7 +18603,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
const int64_t Addend = SimpleVID->Addend;

// Note: We don't need to check alignment here since (by assumption
// from the existance of the gather), our offsets must be sufficiently
// from the existence of the gather), our offsets must be sufficiently
// aligned.

const EVT PtrVT = getPointerTy(DAG.getDataLayout());
Expand Down Expand Up @@ -20639,7 +20639,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
EVT PtrVT = getPointerTy(DAG.getDataLayout());
MVT XLenVT = Subtarget.getXLenVT();
unsigned XLenInBytes = Subtarget.getXLen() / 8;
// Used with vargs to acumulate store chains.
// Used with vargs to accumulate store chains.
std::vector<SDValue> OutChains;

// Assign locations to all of the incoming arguments.
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
//
// The pass adds LPAD (AUIPC with rs1 = X0) machine instructions at the
// beginning of each basic block or function that is referenced by an indrect
// beginning of each basic block or function that is referenced by an indirect
// jump/call instruction.
//
//===----------------------------------------------------------------------===//
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1069,7 +1069,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
const MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI));
if (VLOp.isImm()) {
int64_t Imm = VLOp.getImm();
// Conver the VLMax sentintel to X0 register.
// Convert the VLMax sentintel to X0 register.
if (Imm == RISCV::VLMaxSentinel) {
// If we know the exact VLEN, see if we can use the constant encoding
// for the VLMAX instead. This reduces register pressure slightly.
Expand Down
10 changes: 5 additions & 5 deletions llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -170,10 +170,10 @@ struct BlockData {
// Indicates if the block uses VXRM. Uninitialized means no use.
VXRMInfo VXRMUse;

// Indicates the VXRM output from the block. Unitialized means transparent.
// Indicates the VXRM output from the block. Uninitialized means transparent.
VXRMInfo VXRMOut;

// Keeps track of the available VXRM value at the start of the basic bloc.
// Keeps track of the available VXRM value at the start of the basic block.
VXRMInfo AvailableIn;

// Keeps track of the available VXRM value at the end of the basic block.
Expand Down Expand Up @@ -384,8 +384,8 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
PInfo.AvailableOut.getVXRMImm() ==
BBInfo.AnticipatedIn.getVXRMImm())
continue;
// If the predecessor anticipates this value for all its succesors,
// then a write to VXRM would have already occured before this block is
// If the predecessor anticipates this value for all its successors,
// then a write to VXRM would have already occurred before this block is
// executed.
if (PInfo.AnticipatedOut.isStatic() &&
PInfo.AnticipatedOut.getVXRMImm() ==
Expand Down Expand Up @@ -429,7 +429,7 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
// If all our successors anticipate a value, do the insert.
// NOTE: It's possible that not all predecessors of our successor provide the
// correct value. This can occur on critical edges. If we don't split the
// critical edge we'll also have a write vxrm in the succesor that is
// critical edge we'll also have a write vxrm in the successor that is
// redundant with this one.
if (PendingInsert ||
(BBInfo.AnticipatedOut.isStatic() &&
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/RISCVInstrFormats.td
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ class RVInstCommon<dag outs, dag ins, string opcodestr, string argstr,
bit UsesVXRM = 0;
let TSFlags{20} = UsesVXRM;

// Indicates whther these instructions can partially overlap between source
// Indicates whether these instructions can partially overlap between source
// registers and destination registers according to the vector spec.
// 0 -> not a vector pseudo
// 1 -> default value for vector pseudos. not widening or narrowing.
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1516,7 +1516,7 @@ RISCVInstrInfo::optimizeSelect(MachineInstr &MI,
SeenMIs.erase(DefMI);

// If MI is inside a loop, and DefMI is outside the loop, then kill flags on
// DefMI would be invalid when tranferred inside the loop. Checking for a
// DefMI would be invalid when transferred inside the loop. Checking for a
// loop is expensive, but at least remove kill flags if they are in different
// BBs.
if (DefMI->getParent() != MI.getParent())
Expand Down
8 changes: 4 additions & 4 deletions llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
// For case 1, if a compressed register is available, then the uncompressed
// register is copied to the compressed register and its uses are replaced.
//
// For example, storing zero uses the uncompressible zero register:
// For example, storing zero uses the incompressible zero register:
// sw zero, 0(a0) # if zero
// sw zero, 8(a0) # if zero
// sw zero, 4(a0) # if zero
Expand Down Expand Up @@ -275,7 +275,7 @@ static RegImmPair getRegImmPairPreventingCompression(const MachineInstr &MI) {
// rather than used.
//
// For stores, we can change SrcDest (and Base if SrcDest == Base) but
// cannot resolve an uncompressible offset in this case.
// cannot resolve an incompressible offset in this case.
if (isCompressibleStore(MI)) {
if (!SrcDestCompressed && (BaseCompressed || SrcDest == Base) &&
!NewBaseAdjust)
Expand Down Expand Up @@ -313,7 +313,7 @@ static Register analyzeCompressibleUses(MachineInstr &FirstMI,
// If RegImm.Reg is modified by this instruction, then we cannot optimize
// past this instruction. If the register is already compressed, then it may
// possible to optimize a large offset in the current instruction - this
// will have been detected by the preceeding call to
// will have been detected by the preceding call to
// getRegImmPairPreventingCompression.
if (MI.modifiesRegister(RegImm.Reg, TRI))
break;
Expand Down Expand Up @@ -409,7 +409,7 @@ bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) {
LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n");
for (MachineInstr &MI : MBB) {
// Determine if this instruction would otherwise be compressed if not for
// an uncompressible register or offset.
// an incompressible register or offset.
RegImmPair RegImm = getRegImmPairPreventingCompression(MI);
if (!RegImm.Reg && RegImm.Imm == 0)
continue;
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -434,8 +434,8 @@ bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi,

// Memory constraints have two operands.
if (NumOps != 2 || !Flags.isMemKind()) {
// If the register is used by something other than a memory contraint,
// we should not fold.
// If the register is used by something other than a memory
// constraint, we should not fold.
for (unsigned J = 0; J < NumOps; ++J) {
const MachineOperand &MO = UseMI.getOperand(I + 1 + J);
if (MO.isReg() && MO.getReg() == DestReg)
Expand Down
Loading