@@ -809,8 +809,7 @@ class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
809809};
810810} // end namespace llvm
811811
812- // / Look for a meaningful debug location on the instruction or it's
813- // / operands.
812+ // / Look for a meaningful debug location on the instruction or its operands.
814813static DebugLoc getDebugLocFromInstOrOperands (Instruction *I) {
815814 if (!I)
816815 return DebugLoc ();
@@ -1798,7 +1797,7 @@ class GeneratedRTChecks {
17981797
17991798 // / Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
18001799 // / accurately estimate the cost of the runtime checks. The blocks are
1801- // / un-linked from the IR and is added back during vector code generation. If
1800+ // / un-linked from the IR and are added back during vector code generation. If
18021801 // / there is no vector code generation, the check blocks are removed
18031802 // / completely.
18041803 void create (Loop *L, const LoopAccessInfo &LAI,
@@ -2581,7 +2580,7 @@ PHINode *InnerLoopVectorizer::createInductionResumeValue(
25812580 }
25822581 }
25832582
2584- // Create phi nodes to merge from the backedge-taken check block.
2583+ // Create phi nodes to merge from the backedge-taken check block.
25852584 PHINode *BCResumeVal =
25862585 PHINode::Create (OrigPhi->getType (), 3 , " bc.resume.val" ,
25872586 LoopScalarPreHeader->getFirstNonPHIIt ());
@@ -3002,7 +3001,8 @@ void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
30023001
30033002 // We can't sink an instruction if it is a phi node, is not in the loop,
30043003 // may have side effects or may read from memory.
3005- // TODO Could dor more granular checking to allow sinking a load past non-store instructions.
3004+ // TODO: Could do more granular checking to allow sinking
3005+ // a load past non-store instructions.
30063006 if (!I || isa<PHINode>(I) || !VectorLoop->contains (I) ||
30073007 I->mayHaveSideEffects () || I->mayReadFromMemory ())
30083008 continue ;
@@ -3140,9 +3140,8 @@ void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
31403140
31413141 // (2) Add to the worklist all bitcast and getelementptr instructions used by
31423142 // memory accesses requiring a scalar use. The pointer operands of loads and
3143- // stores will be scalar as long as the memory accesses is not a gather or
3144- // scatter operation. The value operand of a store will remain scalar if the
3145- // store is scalarized.
3143+ // stores will be scalar unless the operation is a gather or scatter.
3144+ // The value operand of a store will remain scalar if the store is scalarized.
31463145 for (auto *BB : TheLoop->blocks ())
31473146 for (auto &I : *BB) {
31483147 if (auto *Load = dyn_cast<LoadInst>(&I)) {
@@ -3415,7 +3414,7 @@ bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
34153414 assert (Group && " Must have a group." );
34163415 unsigned InterleaveFactor = Group->getFactor ();
34173416
3418- // If the instruction's allocated size doesn't equal it's type size, it
3417+ // If the instruction's allocated size doesn't equal its type size, it
34193418 // requires padding and will be scalarized.
34203419 auto &DL = I->getDataLayout ();
34213420 auto *ScalarTy = getLoadStoreType (I);
@@ -3515,11 +3514,11 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
35153514 assert (VF.isVector () && !Uniforms.contains (VF) &&
35163515 " This function should not be visited twice for the same VF" );
35173516
3518- // Visit the list of Uniforms. If we'll not find any uniform value, we'll
3519- // not analyze again. Uniforms.count(VF) will return 1.
3517+ // Visit the list of Uniforms. If we find no uniform value, we won't
3518+ // analyze again. Uniforms.count(VF) will return 1.
35203519 Uniforms[VF].clear ();
35213520
3522- // We now know that the loop is vectorizable!
3521+ // Now we know that the loop is vectorizable!
35233522 // Collect instructions inside the loop that will remain uniform after
35243523 // vectorization.
35253524
@@ -3566,7 +3565,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
35663565
35673566 auto PrevVF = VF.divideCoefficientBy (2 );
35683567 // Return true if all lanes perform the same memory operation, and we can
3569- // thus chose to execute only one.
3568+ // thus choose to execute only one.
35703569 auto IsUniformMemOpUse = [&](Instruction *I) {
35713570 // If the value was already known to not be uniform for the previous
35723571 // (smaller VF), it cannot be uniform for the larger VF.
@@ -3957,7 +3956,7 @@ FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
39573956FixedScalableVFPair
39583957LoopVectorizationCostModel::computeMaxVF (ElementCount UserVF, unsigned UserIC) {
39593958 if (Legal->getRuntimePointerChecking ()->Need && TTI.hasBranchDivergence ()) {
3960- // TODO: It may by useful to do since it's still likely to be dynamically
3959+ // TODO: It may be useful to do since it's still likely to be dynamically
39613960 // uniform if the target can skip.
39623961 reportVectorizationFailure (
39633962 " Not inserting runtime ptr check for divergent target" ,
@@ -4031,7 +4030,7 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
40314030 assert (WideningDecisions.empty () && Uniforms.empty () && Scalars.empty () &&
40324031 " No decisions should have been taken at this point" );
40334032 // Note: There is no need to invalidate any cost modeling decisions here, as
4034- // non where taken so far.
4033+ // none were taken so far.
40354034 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue ();
40364035 }
40374036
@@ -7940,7 +7939,7 @@ EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
79407939 BasicBlock *Bypass, BasicBlock *Insert) {
79417940
79427941 assert (EPI.TripCount &&
7943- " Expected trip count to have been safed in the first pass." );
7942+ " Expected trip count to have been saved in the first pass." );
79447943 assert (
79457944 (!isa<Instruction>(EPI.TripCount ) ||
79467945 DT->dominates (cast<Instruction>(EPI.TripCount )->getParent (), Insert)) &&
0 commit comments