@@ -1251,8 +1251,8 @@ class LoopVectorizationCostModel {
12511251 return false ;
12521252
12531253 // Get the source and destination types of the truncate.
1254- Type *SrcTy = ToVectorTy (cast<CastInst>(I)->getSrcTy (), VF);
1255- Type *DestTy = ToVectorTy (cast<CastInst>(I)->getDestTy (), VF);
1254+ Type *SrcTy = toVectorTy (cast<CastInst>(I)->getSrcTy (), VF);
1255+ Type *DestTy = toVectorTy (cast<CastInst>(I)->getDestTy (), VF);
12561256
12571257 // If the truncate is free for the given types, return false. Replacing a
12581258 // free truncate with an induction variable would add an induction variable
@@ -3526,14 +3526,14 @@ LoopVectorizationCostModel::getDivRemSpeculationCost(Instruction *I,
35263526 }
35273527 InstructionCost SafeDivisorCost = 0 ;
35283528
3529- auto *VecTy = ToVectorTy (I->getType (), VF);
3529+ auto *VecTy = toVectorTy (I->getType (), VF);
35303530
35313531 // The cost of the select guard to ensure all lanes are well defined
35323532 // after we speculate above any internal control flow.
3533- SafeDivisorCost += TTI. getCmpSelInstrCost (
3534- Instruction::Select, VecTy,
3535- ToVectorTy (Type::getInt1Ty (I->getContext ()), VF),
3536- CmpInst::BAD_ICMP_PREDICATE, CostKind);
3533+ SafeDivisorCost +=
3534+ TTI. getCmpSelInstrCost ( Instruction::Select, VecTy,
3535+ toVectorTy (Type::getInt1Ty (I->getContext ()), VF),
3536+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
35373537
35383538 // Certain instructions can be cheaper to vectorize if they have a constant
35393539 // second vector operand. One example of this are shifts on x86.
@@ -4654,7 +4654,7 @@ static bool willGenerateVectors(VPlan &Plan, ElementCount VF,
46544654 }
46554655
46564656 auto WillWiden = [&TTI, VF](Type *ScalarTy) {
4657- Type *VectorTy = ToVectorTy (ScalarTy, VF);
4657+ Type *VectorTy = toVectorTy (ScalarTy, VF);
46584658 unsigned NumLegalParts = TTI.getNumberOfParts (VectorTy);
46594659 if (!NumLegalParts)
46604660 return false ;
@@ -5645,7 +5645,7 @@ InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
56455645 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
56465646 if (isScalarWithPredication (I, VF) && !I->getType ()->isVoidTy ()) {
56475647 ScalarCost += TTI.getScalarizationOverhead (
5648- cast<VectorType>(ToVectorTy (I->getType (), VF)),
5648+ cast<VectorType>(toVectorTy (I->getType (), VF)),
56495649 APInt::getAllOnes (VF.getFixedValue ()), /* Insert*/ true ,
56505650 /* Extract*/ false , CostKind);
56515651 ScalarCost +=
@@ -5664,7 +5664,7 @@ InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
56645664 Worklist.push_back (J);
56655665 else if (needsExtract (J, VF)) {
56665666 ScalarCost += TTI.getScalarizationOverhead (
5667- cast<VectorType>(ToVectorTy (J->getType (), VF)),
5667+ cast<VectorType>(toVectorTy (J->getType (), VF)),
56685668 APInt::getAllOnes (VF.getFixedValue ()), /* Insert*/ false ,
56695669 /* Extract*/ true , CostKind);
56705670 }
@@ -5775,7 +5775,7 @@ LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
57755775
57765776 unsigned AS = getLoadStoreAddressSpace (I);
57775777 Value *Ptr = getLoadStorePointerOperand (I);
5778- Type *PtrTy = ToVectorTy (Ptr->getType (), VF);
5778+ Type *PtrTy = toVectorTy (Ptr->getType (), VF);
57795779 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
57805780 // that it is being called from this specific place.
57815781
@@ -5826,7 +5826,7 @@ InstructionCost
58265826LoopVectorizationCostModel::getConsecutiveMemOpCost (Instruction *I,
58275827 ElementCount VF) {
58285828 Type *ValTy = getLoadStoreType (I);
5829- auto *VectorTy = cast<VectorType>(ToVectorTy (ValTy, VF));
5829+ auto *VectorTy = cast<VectorType>(toVectorTy (ValTy, VF));
58305830 Value *Ptr = getLoadStorePointerOperand (I);
58315831 unsigned AS = getLoadStoreAddressSpace (I);
58325832 int ConsecutiveStride = Legal->isConsecutivePtr (ValTy, Ptr);
@@ -5858,7 +5858,7 @@ LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
58585858 assert (Legal->isUniformMemOp (*I, VF));
58595859
58605860 Type *ValTy = getLoadStoreType (I);
5861- auto *VectorTy = cast<VectorType>(ToVectorTy (ValTy, VF));
5861+ auto *VectorTy = cast<VectorType>(toVectorTy (ValTy, VF));
58625862 const Align Alignment = getLoadStoreAlignment (I);
58635863 unsigned AS = getLoadStoreAddressSpace (I);
58645864 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
@@ -5884,7 +5884,7 @@ InstructionCost
58845884LoopVectorizationCostModel::getGatherScatterCost (Instruction *I,
58855885 ElementCount VF) {
58865886 Type *ValTy = getLoadStoreType (I);
5887- auto *VectorTy = cast<VectorType>(ToVectorTy (ValTy, VF));
5887+ auto *VectorTy = cast<VectorType>(toVectorTy (ValTy, VF));
58885888 const Align Alignment = getLoadStoreAlignment (I);
58895889 const Value *Ptr = getLoadStorePointerOperand (I);
58905890
@@ -5902,7 +5902,7 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
59025902
59035903 Instruction *InsertPos = Group->getInsertPos ();
59045904 Type *ValTy = getLoadStoreType (InsertPos);
5905- auto *VectorTy = cast<VectorType>(ToVectorTy (ValTy, VF));
5905+ auto *VectorTy = cast<VectorType>(toVectorTy (ValTy, VF));
59065906 unsigned AS = getLoadStoreAddressSpace (InsertPos);
59075907 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
59085908
@@ -6147,7 +6147,7 @@ InstructionCost LoopVectorizationCostModel::getScalarizationOverhead(
61476147 return 0 ;
61486148
61496149 InstructionCost Cost = 0 ;
6150- Type *RetTy = ToVectorTy (I->getType (), VF);
6150+ Type *RetTy = toVectorTy (I->getType (), VF);
61516151 if (!RetTy->isVoidTy () &&
61526152 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore ()))
61536153 Cost += TTI.getScalarizationOverhead (
@@ -6413,9 +6413,9 @@ void LoopVectorizationCostModel::setVectorizedCallDecision(ElementCount VF) {
64136413
64146414 bool MaskRequired = Legal->isMaskRequired (CI);
64156415 // Compute corresponding vector type for return value and arguments.
6416- Type *RetTy = ToVectorTy (ScalarRetTy, VF);
6416+ Type *RetTy = toVectorTy (ScalarRetTy, VF);
64176417 for (Type *ScalarTy : ScalarTys)
6418- Tys.push_back (ToVectorTy (ScalarTy, VF));
6418+ Tys.push_back (toVectorTy (ScalarTy, VF));
64196419
64206420 // An in-loop reduction using an fmuladd intrinsic is a special case;
64216421 // we don't want the normal cost for that intrinsic.
@@ -6605,7 +6605,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
66056605 HasSingleCopyAfterVectorization (I, VF));
66066606 VectorTy = RetTy;
66076607 } else
6608- VectorTy = ToVectorTy (RetTy, VF);
6608+ VectorTy = toVectorTy (RetTy, VF);
66096609
66106610 if (VF.isVector () && VectorTy->isVectorTy () &&
66116611 !TTI.getNumberOfParts (VectorTy))
@@ -6665,8 +6665,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
66656665 return Switch->getNumCases () *
66666666 TTI.getCmpSelInstrCost (
66676667 Instruction::ICmp,
6668- ToVectorTy (Switch->getCondition ()->getType (), VF),
6669- ToVectorTy (Type::getInt1Ty (I->getContext ()), VF),
6668+ toVectorTy (Switch->getCondition ()->getType (), VF),
6669+ toVectorTy (Type::getInt1Ty (I->getContext ()), VF),
66706670 CmpInst::ICMP_EQ, CostKind);
66716671 }
66726672 case Instruction::PHI: {
@@ -6711,8 +6711,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
67116711 }
67126712 return (Phi->getNumIncomingValues () - 1 ) *
67136713 TTI.getCmpSelInstrCost (
6714- Instruction::Select, ToVectorTy (ResultTy, VF),
6715- ToVectorTy (Type::getInt1Ty (Phi->getContext ()), VF),
6714+ Instruction::Select, toVectorTy (ResultTy, VF),
6715+ toVectorTy (Type::getInt1Ty (Phi->getContext ()), VF),
67166716 CmpInst::BAD_ICMP_PREDICATE, CostKind);
67176717 }
67186718
@@ -6721,8 +6721,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
67216721 if (VF.isVector () && foldTailWithEVL () &&
67226722 Legal->getReductionVars ().contains (Phi) && !isInLoopReduction (Phi)) {
67236723 IntrinsicCostAttributes ICA (
6724- Intrinsic::vp_merge, ToVectorTy (Phi->getType (), VF),
6725- {ToVectorTy (Type::getInt1Ty (Phi->getContext ()), VF)});
6724+ Intrinsic::vp_merge, toVectorTy (Phi->getType (), VF),
6725+ {toVectorTy (Type::getInt1Ty (Phi->getContext ()), VF)});
67266726 return TTI.getIntrinsicInstrCost (ICA, CostKind);
67276727 }
67286728
@@ -6862,7 +6862,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
68626862 ValTy = IntegerType::get (ValTy->getContext (), MinBWs[I]);
68636863 }
68646864
6865- VectorTy = ToVectorTy (ValTy, VF);
6865+ VectorTy = toVectorTy (ValTy, VF);
68666866 return TTI.getCmpSelInstrCost (I->getOpcode (), VectorTy, nullptr ,
68676867 cast<CmpInst>(I)->getPredicate (), CostKind,
68686868 {TTI::OK_AnyValue, TTI::OP_None},
@@ -6880,7 +6880,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
68806880 if (Decision == CM_Scalarize)
68816881 Width = ElementCount::getFixed (1 );
68826882 }
6883- VectorTy = ToVectorTy (getLoadStoreType (I), Width);
6883+ VectorTy = toVectorTy (getLoadStoreType (I), Width);
68846884 return getMemoryInstructionCost (I, VF);
68856885 }
68866886 case Instruction::BitCast:
@@ -6961,7 +6961,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
69616961 SrcScalarTy =
69626962 IntegerType::get (SrcScalarTy->getContext (), MinBWs[Op0AsInstruction]);
69636963 Type *SrcVecTy =
6964- VectorTy->isVectorTy () ? ToVectorTy (SrcScalarTy, VF) : SrcScalarTy;
6964+ VectorTy->isVectorTy () ? toVectorTy (SrcScalarTy, VF) : SrcScalarTy;
69656965
69666966 if (canTruncateToMinimalBitwidth (I, VF)) {
69676967 // If the result type is <= the source type, there will be no extend
@@ -7490,7 +7490,7 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
74907490 // Pre-compute the cost for I, if it has a reduction pattern cost.
74917491 for (Instruction *I : ChainOpsAndOperands) {
74927492 auto ReductionCost = CM.getReductionPatternCost (
7493- I, VF, ToVectorTy (I->getType (), VF), TTI::TCK_RecipThroughput);
7493+ I, VF, toVectorTy (I->getType (), VF), TTI::TCK_RecipThroughput);
74947494 if (!ReductionCost)
74957495 continue ;
74967496
0 commit comments