@@ -5367,7 +5367,7 @@ static unsigned selectUmullSmull(SDValue &N0, SDValue &N1, SelectionDAG &DAG,
53675367 return AArch64ISD::UMULL;
53685368 } else if (VT == MVT::v2i64 && DAG.MaskedValueIsZero(N0, Mask) &&
53695369 DAG.MaskedValueIsZero(N1, Mask)) {
5370- // For v2i64 we look more aggresively at both operands being zero, to avoid
5370+ // For v2i64 we look more aggressively at both operands being zero, to avoid
53715371 // scalarization.
53725372 return AArch64ISD::UMULL;
53735373 }
@@ -5844,7 +5844,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
58445844 } else if (Ty.isVector() && Ty.isInteger() && isTypeLegal(Ty)) {
58455845 return DAG.getNode(ISD::ABS, dl, Ty, Op.getOperand(1));
58465846 } else {
5847- report_fatal_error("Unexpected type for AArch64 NEON intrinic ");
5847+ report_fatal_error("Unexpected type for AArch64 NEON intrinsic ");
58485848 }
58495849 }
58505850 case Intrinsic::aarch64_neon_pmull64: {
@@ -8630,9 +8630,9 @@ static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {
86308630 if (SizeInBits < 8)
86318631 return false;
86328632
8633- APInt RequredZero (SizeInBits, 0xFE);
8633+ APInt RequiredZero (SizeInBits, 0xFE);
86348634 KnownBits Bits = DAG.computeKnownBits(Arg, 4);
8635- bool ZExtBool = (Bits.Zero & RequredZero ) == RequredZero ;
8635+ bool ZExtBool = (Bits.Zero & RequiredZero ) == RequiredZero ;
86368636 return ZExtBool;
86378637}
86388638
@@ -13536,7 +13536,7 @@ static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1,
1353613536 OpLHS = DAG.getBitcast(MVT::v2f32, OpLHS);
1353713537 } else {
1353813538 assert(VT.getScalarSizeInBits() == 32 &&
13539- "Expected 16 or 32 bit shuffle elemements ");
13539+ "Expected 16 or 32 bit shuffle elements ");
1354013540 Input = DAG.getBitcast(MVT::v2f64, Input);
1354113541 OpLHS = DAG.getBitcast(MVT::v2f64, OpLHS);
1354213542 }
@@ -13941,7 +13941,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1394113941 unsigned NewEltCount = VT.getSizeInBits() / LaneSize;
1394213942 MVT NewVecTy = MVT::getVectorVT(NewEltTy, NewEltCount);
1394313943 V1 = DAG.getBitcast(NewVecTy, V1);
13944- // Constuct the DUP instruction
13944+ // Construct the DUP instruction
1394513945 V1 = constructDup(V1, Lane, dl, NewVecTy, Opcode, DAG);
1394613946 // Cast back to the original type
1394713947 return DAG.getBitcast(VT, V1);
@@ -16900,12 +16900,12 @@ bool AArch64TargetLowering::optimizeExtendOrTruncateConversion(
1690016900}
1690116901
1690216902bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
16903- Align &RequiredAligment ) const {
16903+ Align &RequiredAlignment ) const {
1690416904 if (!LoadedType.isSimple() ||
1690516905 (!LoadedType.isInteger() && !LoadedType.isFloatingPoint()))
1690616906 return false;
1690716907 // Cyclone supports unaligned accesses.
16908- RequiredAligment = Align(1);
16908+ RequiredAlignment = Align(1);
1690916909 unsigned NumBits = LoadedType.getSizeInBits();
1691016910 return NumBits == 32 || NumBits == 64;
1691116911}
@@ -18028,7 +18028,7 @@ static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
1802818028 EXT1->getOperand(0)->getValueType(0) != MVT::v16i8)
1802918029 return SDValue();
1803018030
18031- // Pattern is dectected . Let's convert it to sequence of nodes.
18031+ // Pattern is detected . Let's convert it to sequence of nodes.
1803218032 SDLoc DL(N);
1803318033
1803418034 // First, create the node pattern of UABD/SABD.
@@ -18246,10 +18246,10 @@ static SDValue performVecReduceAddCombine(SDNode *N, SelectionDAG &DAG,
1824618246 DAG.getConstant(I * 16, DL, MVT::i64));
1824718247 SDValue Dot =
1824818248 DAG.getNode(DotOpcode, DL, Zeros.getValueType(), Zeros, Vec8Op0, Vec8Op1);
18249- SDValue VecReudceAdd8 =
18249+ SDValue VecReduceAdd8 =
1825018250 DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), Dot);
1825118251 return DAG.getNode(ISD::ADD, DL, N->getValueType(0), VecReduceAdd16,
18252- VecReudceAdd8 );
18252+ VecReduceAdd8 );
1825318253}
1825418254
1825518255// Given an (integer) vecreduce, we know the order of the inputs does not
@@ -21474,7 +21474,7 @@ static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) {
2147421474 case Intrinsic::aarch64_neon_ushl:
2147521475 // For positive shift amounts we can use SHL, as ushl/sshl perform a regular
2147621476 // left shift for positive shift amounts. For negative shifts we can use a
21477- // VASHR/VLSHR as appropiate .
21477+ // VASHR/VLSHR as appropriate .
2147821478 if (ShiftAmount < 0) {
2147921479 Opcode = IID == Intrinsic::aarch64_neon_sshl ? AArch64ISD::VASHR
2148021480 : AArch64ISD::VLSHR;
@@ -22880,7 +22880,7 @@ static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
2288022880}
2288122881
2288222882static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) {
22883- assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexepected Opcode!");
22883+ assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexpected Opcode!");
2288422884
2288522885 // splice(pg, op1, undef) -> op1
2288622886 if (N->getOperand(2).isUndef())
@@ -23616,10 +23616,10 @@ static SDValue performLOADCombine(SDNode *N,
2361623616 LD->getMemOperand()->getFlags(), LD->getAAInfo());
2361723617 SDValue UndefVector = DAG.getUNDEF(NewVT);
2361823618 SDValue InsertIdx = DAG.getVectorIdxConstant(0, DL);
23619- SDValue ExtendedReminingLoad =
23619+ SDValue ExtendedRemainingLoad =
2362023620 DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NewVT,
2362123621 {UndefVector, RemainingLoad, InsertIdx});
23622- LoadOps.push_back(ExtendedReminingLoad );
23622+ LoadOps.push_back(ExtendedRemainingLoad );
2362323623 LoadOpsChain.push_back(SDValue(cast<SDNode>(RemainingLoad), 1));
2362423624 EVT ConcatVT =
2362523625 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
0 commit comments