Skip to content

Commit 538e112

Browse files
committed
[LV] Rename ToVectorTy to toVectorTy (NFC)
This is for consistency with other helpers (and also follows the LLVM naming conventions).
1 parent 0b4ee8d commit 538e112

File tree

3 files changed

+51
-51
lines changed

3 files changed

+51
-51
lines changed

llvm/include/llvm/IR/VectorTypeUtils.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,14 @@ namespace llvm {
1616
/// A helper function for converting Scalar types to vector types. If
1717
/// the incoming type is void, we return void. If the EC represents a
1818
/// scalar, we return the scalar type.
19-
inline Type *ToVectorTy(Type *Scalar, ElementCount EC) {
19+
inline Type *toVectorTy(Type *Scalar, ElementCount EC) {
2020
if (Scalar->isVoidTy() || Scalar->isMetadataTy() || EC.isScalar())
2121
return Scalar;
2222
return VectorType::get(Scalar, EC);
2323
}
2424

25-
inline Type *ToVectorTy(Type *Scalar, unsigned VF) {
26-
return ToVectorTy(Scalar, ElementCount::getFixed(VF));
25+
inline Type *toVectorTy(Type *Scalar, unsigned VF) {
26+
return toVectorTy(Scalar, ElementCount::getFixed(VF));
2727
}
2828

2929
/// A helper for converting structs of scalar types to structs of vector types.
@@ -41,7 +41,7 @@ Type *toScalarizedStructTy(StructType *StructTy);
4141
bool isVectorizedStructTy(StructType *StructTy);
4242

4343
/// A helper for converting to vectorized types. For scalar types, this is
44-
/// equivalent to calling `ToVectorTy`. For struct types, this returns a new
44+
/// equivalent to calling `toVectorTy`. For struct types, this returns a new
4545
/// struct where each element type has been widened to a vector type.
4646
/// Note:
4747
/// - If the incoming type is void, we return void
@@ -50,7 +50,7 @@ bool isVectorizedStructTy(StructType *StructTy);
5050
inline Type *toVectorizedTy(Type *Ty, ElementCount EC) {
5151
if (StructType *StructTy = dyn_cast<StructType>(Ty))
5252
return toVectorizedStructTy(StructTy, EC);
53-
return ToVectorTy(Ty, EC);
53+
return toVectorTy(Ty, EC);
5454
}
5555

5656
/// A helper for converting vectorized types to scalarized (non-vector) types.

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1251,8 +1251,8 @@ class LoopVectorizationCostModel {
12511251
return false;
12521252

12531253
// Get the source and destination types of the truncate.
1254-
Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1255-
Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1254+
Type *SrcTy = toVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1255+
Type *DestTy = toVectorTy(cast<CastInst>(I)->getDestTy(), VF);
12561256

12571257
// If the truncate is free for the given types, return false. Replacing a
12581258
// free truncate with an induction variable would add an induction variable
@@ -3526,14 +3526,14 @@ LoopVectorizationCostModel::getDivRemSpeculationCost(Instruction *I,
35263526
}
35273527
InstructionCost SafeDivisorCost = 0;
35283528

3529-
auto *VecTy = ToVectorTy(I->getType(), VF);
3529+
auto *VecTy = toVectorTy(I->getType(), VF);
35303530

35313531
// The cost of the select guard to ensure all lanes are well defined
35323532
// after we speculate above any internal control flow.
3533-
SafeDivisorCost += TTI.getCmpSelInstrCost(
3534-
Instruction::Select, VecTy,
3535-
ToVectorTy(Type::getInt1Ty(I->getContext()), VF),
3536-
CmpInst::BAD_ICMP_PREDICATE, CostKind);
3533+
SafeDivisorCost +=
3534+
TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
3535+
toVectorTy(Type::getInt1Ty(I->getContext()), VF),
3536+
CmpInst::BAD_ICMP_PREDICATE, CostKind);
35373537

35383538
// Certain instructions can be cheaper to vectorize if they have a constant
35393539
// second vector operand. One example of this are shifts on x86.
@@ -4654,7 +4654,7 @@ static bool willGenerateVectors(VPlan &Plan, ElementCount VF,
46544654
}
46554655

46564656
auto WillWiden = [&TTI, VF](Type *ScalarTy) {
4657-
Type *VectorTy = ToVectorTy(ScalarTy, VF);
4657+
Type *VectorTy = toVectorTy(ScalarTy, VF);
46584658
unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
46594659
if (!NumLegalParts)
46604660
return false;
@@ -5645,7 +5645,7 @@ InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
56455645
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
56465646
if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
56475647
ScalarCost += TTI.getScalarizationOverhead(
5648-
cast<VectorType>(ToVectorTy(I->getType(), VF)),
5648+
cast<VectorType>(toVectorTy(I->getType(), VF)),
56495649
APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ true,
56505650
/*Extract*/ false, CostKind);
56515651
ScalarCost +=
@@ -5664,7 +5664,7 @@ InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
56645664
Worklist.push_back(J);
56655665
else if (needsExtract(J, VF)) {
56665666
ScalarCost += TTI.getScalarizationOverhead(
5667-
cast<VectorType>(ToVectorTy(J->getType(), VF)),
5667+
cast<VectorType>(toVectorTy(J->getType(), VF)),
56685668
APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
56695669
/*Extract*/ true, CostKind);
56705670
}
@@ -5775,7 +5775,7 @@ LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
57755775

57765776
unsigned AS = getLoadStoreAddressSpace(I);
57775777
Value *Ptr = getLoadStorePointerOperand(I);
5778-
Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
5778+
Type *PtrTy = toVectorTy(Ptr->getType(), VF);
57795779
// NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
57805780
// that it is being called from this specific place.
57815781

@@ -5826,7 +5826,7 @@ InstructionCost
58265826
LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
58275827
ElementCount VF) {
58285828
Type *ValTy = getLoadStoreType(I);
5829-
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5829+
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
58305830
Value *Ptr = getLoadStorePointerOperand(I);
58315831
unsigned AS = getLoadStoreAddressSpace(I);
58325832
int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
@@ -5858,7 +5858,7 @@ LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
58585858
assert(Legal->isUniformMemOp(*I, VF));
58595859

58605860
Type *ValTy = getLoadStoreType(I);
5861-
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5861+
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
58625862
const Align Alignment = getLoadStoreAlignment(I);
58635863
unsigned AS = getLoadStoreAddressSpace(I);
58645864
enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
@@ -5884,7 +5884,7 @@ InstructionCost
58845884
LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
58855885
ElementCount VF) {
58865886
Type *ValTy = getLoadStoreType(I);
5887-
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5887+
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
58885888
const Align Alignment = getLoadStoreAlignment(I);
58895889
const Value *Ptr = getLoadStorePointerOperand(I);
58905890

@@ -5902,7 +5902,7 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
59025902

59035903
Instruction *InsertPos = Group->getInsertPos();
59045904
Type *ValTy = getLoadStoreType(InsertPos);
5905-
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5905+
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
59065906
unsigned AS = getLoadStoreAddressSpace(InsertPos);
59075907
enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
59085908

@@ -6147,7 +6147,7 @@ InstructionCost LoopVectorizationCostModel::getScalarizationOverhead(
61476147
return 0;
61486148

61496149
InstructionCost Cost = 0;
6150-
Type *RetTy = ToVectorTy(I->getType(), VF);
6150+
Type *RetTy = toVectorTy(I->getType(), VF);
61516151
if (!RetTy->isVoidTy() &&
61526152
(!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
61536153
Cost += TTI.getScalarizationOverhead(
@@ -6413,9 +6413,9 @@ void LoopVectorizationCostModel::setVectorizedCallDecision(ElementCount VF) {
64136413

64146414
bool MaskRequired = Legal->isMaskRequired(CI);
64156415
// Compute corresponding vector type for return value and arguments.
6416-
Type *RetTy = ToVectorTy(ScalarRetTy, VF);
6416+
Type *RetTy = toVectorTy(ScalarRetTy, VF);
64176417
for (Type *ScalarTy : ScalarTys)
6418-
Tys.push_back(ToVectorTy(ScalarTy, VF));
6418+
Tys.push_back(toVectorTy(ScalarTy, VF));
64196419

64206420
// An in-loop reduction using an fmuladd intrinsic is a special case;
64216421
// we don't want the normal cost for that intrinsic.
@@ -6605,7 +6605,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
66056605
HasSingleCopyAfterVectorization(I, VF));
66066606
VectorTy = RetTy;
66076607
} else
6608-
VectorTy = ToVectorTy(RetTy, VF);
6608+
VectorTy = toVectorTy(RetTy, VF);
66096609

66106610
if (VF.isVector() && VectorTy->isVectorTy() &&
66116611
!TTI.getNumberOfParts(VectorTy))
@@ -6665,8 +6665,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
66656665
return Switch->getNumCases() *
66666666
TTI.getCmpSelInstrCost(
66676667
Instruction::ICmp,
6668-
ToVectorTy(Switch->getCondition()->getType(), VF),
6669-
ToVectorTy(Type::getInt1Ty(I->getContext()), VF),
6668+
toVectorTy(Switch->getCondition()->getType(), VF),
6669+
toVectorTy(Type::getInt1Ty(I->getContext()), VF),
66706670
CmpInst::ICMP_EQ, CostKind);
66716671
}
66726672
case Instruction::PHI: {
@@ -6711,8 +6711,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
67116711
}
67126712
return (Phi->getNumIncomingValues() - 1) *
67136713
TTI.getCmpSelInstrCost(
6714-
Instruction::Select, ToVectorTy(ResultTy, VF),
6715-
ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6714+
Instruction::Select, toVectorTy(ResultTy, VF),
6715+
toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
67166716
CmpInst::BAD_ICMP_PREDICATE, CostKind);
67176717
}
67186718

@@ -6721,8 +6721,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
67216721
if (VF.isVector() && foldTailWithEVL() &&
67226722
Legal->getReductionVars().contains(Phi) && !isInLoopReduction(Phi)) {
67236723
IntrinsicCostAttributes ICA(
6724-
Intrinsic::vp_merge, ToVectorTy(Phi->getType(), VF),
6725-
{ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
6724+
Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
6725+
{toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
67266726
return TTI.getIntrinsicInstrCost(ICA, CostKind);
67276727
}
67286728

@@ -6862,7 +6862,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
68626862
ValTy = IntegerType::get(ValTy->getContext(), MinBWs[I]);
68636863
}
68646864

6865-
VectorTy = ToVectorTy(ValTy, VF);
6865+
VectorTy = toVectorTy(ValTy, VF);
68666866
return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
68676867
cast<CmpInst>(I)->getPredicate(), CostKind,
68686868
{TTI::OK_AnyValue, TTI::OP_None},
@@ -6880,7 +6880,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
68806880
if (Decision == CM_Scalarize)
68816881
Width = ElementCount::getFixed(1);
68826882
}
6883-
VectorTy = ToVectorTy(getLoadStoreType(I), Width);
6883+
VectorTy = toVectorTy(getLoadStoreType(I), Width);
68846884
return getMemoryInstructionCost(I, VF);
68856885
}
68866886
case Instruction::BitCast:
@@ -6961,7 +6961,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
69616961
SrcScalarTy =
69626962
IntegerType::get(SrcScalarTy->getContext(), MinBWs[Op0AsInstruction]);
69636963
Type *SrcVecTy =
6964-
VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6964+
VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;
69656965

69666966
if (canTruncateToMinimalBitwidth(I, VF)) {
69676967
// If the result type is <= the source type, there will be no extend
@@ -7490,7 +7490,7 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
74907490
// Pre-compute the cost for I, if it has a reduction pattern cost.
74917491
for (Instruction *I : ChainOpsAndOperands) {
74927492
auto ReductionCost = CM.getReductionPatternCost(
7493-
I, VF, ToVectorTy(I->getType(), VF), TTI::TCK_RecipThroughput);
7493+
I, VF, toVectorTy(I->getType(), VF), TTI::TCK_RecipThroughput);
74947494
if (!ReductionCost)
74957495
continue;
74967496

llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1030,11 +1030,11 @@ InstructionCost VPWidenIntrinsicRecipe::computeCost(ElementCount VF,
10301030
Arguments.push_back(V);
10311031
}
10321032

1033-
Type *RetTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
1033+
Type *RetTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
10341034
SmallVector<Type *> ParamTys;
10351035
for (unsigned I = 0; I != getNumOperands(); ++I)
10361036
ParamTys.push_back(
1037-
ToVectorTy(Ctx.Types.inferScalarType(getOperand(I)), VF));
1037+
toVectorTy(Ctx.Types.inferScalarType(getOperand(I)), VF));
10381038

10391039
// TODO: Rework TTI interface to avoid reliance on underlying IntrinsicInst.
10401040
FastMathFlags FMF = hasFastMathFlags() ? getFastMathFlags() : FastMathFlags();
@@ -1202,7 +1202,7 @@ InstructionCost VPWidenSelectRecipe::computeCost(ElementCount VF,
12021202
SelectInst *SI = cast<SelectInst>(getUnderlyingValue());
12031203
bool ScalarCond = getOperand(0)->isDefinedOutsideLoopRegions();
12041204
Type *ScalarTy = Ctx.Types.inferScalarType(this);
1205-
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
1205+
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
12061206
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
12071207

12081208
VPValue *Op0, *Op1;
@@ -1383,7 +1383,7 @@ InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
13831383
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
13841384
switch (Opcode) {
13851385
case Instruction::FNeg: {
1386-
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
1386+
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
13871387
return Ctx.TTI.getArithmeticInstrCost(
13881388
Opcode, VectorTy, CostKind,
13891389
{TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
@@ -1421,7 +1421,7 @@ InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
14211421
if (RHSInfo.Kind == TargetTransformInfo::OK_AnyValue &&
14221422
getOperand(1)->isDefinedOutsideLoopRegions())
14231423
RHSInfo.Kind = TargetTransformInfo::OK_UniformValue;
1424-
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
1424+
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
14251425
Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
14261426

14271427
SmallVector<const Value *, 4> Operands;
@@ -1434,13 +1434,13 @@ InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
14341434
}
14351435
case Instruction::Freeze: {
14361436
// This opcode is unknown. Assume that it is the same as 'mul'.
1437-
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
1437+
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
14381438
return Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
14391439
}
14401440
case Instruction::ICmp:
14411441
case Instruction::FCmp: {
14421442
Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
1443-
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
1443+
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
14441444
return Ctx.TTI.getCmpSelInstrCost(Opcode, VectorTy, nullptr, getPredicate(),
14451445
CostKind,
14461446
{TTI::OK_AnyValue, TTI::OP_None},
@@ -1568,8 +1568,8 @@ InstructionCost VPWidenCastRecipe::computeCost(ElementCount VF,
15681568
}
15691569

15701570
auto *SrcTy =
1571-
cast<VectorType>(ToVectorTy(Ctx.Types.inferScalarType(Operand), VF));
1572-
auto *DestTy = cast<VectorType>(ToVectorTy(getResultType(), VF));
1571+
cast<VectorType>(toVectorTy(Ctx.Types.inferScalarType(Operand), VF));
1572+
auto *DestTy = cast<VectorType>(toVectorTy(getResultType(), VF));
15731573
// Arm TTI will use the underlying instruction to determine the cost.
15741574
return Ctx.TTI.getCastInstrCost(
15751575
Opcode, DestTy, SrcTy, CCH, TTI::TCK_RecipThroughput,
@@ -2077,8 +2077,8 @@ InstructionCost VPBlendRecipe::computeCost(ElementCount VF,
20772077
if (vputils::onlyFirstLaneUsed(this))
20782078
return Ctx.TTI.getCFInstrCost(Instruction::PHI, CostKind);
20792079

2080-
Type *ResultTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
2081-
Type *CmpTy = ToVectorTy(Type::getInt1Ty(Ctx.Types.getContext()), VF);
2080+
Type *ResultTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
2081+
Type *CmpTy = toVectorTy(Type::getInt1Ty(Ctx.Types.getContext()), VF);
20822082
return (getNumIncomingValues() - 1) *
20832083
Ctx.TTI.getCmpSelInstrCost(Instruction::Select, ResultTy, CmpTy,
20842084
CmpInst::BAD_ICMP_PREDICATE, CostKind);
@@ -2199,7 +2199,7 @@ InstructionCost VPReductionRecipe::computeCost(ElementCount VF,
21992199
VPCostContext &Ctx) const {
22002200
RecurKind RdxKind = RdxDesc.getRecurrenceKind();
22012201
Type *ElementTy = Ctx.Types.inferScalarType(this);
2202-
auto *VectorTy = cast<VectorType>(ToVectorTy(ElementTy, VF));
2202+
auto *VectorTy = cast<VectorType>(toVectorTy(ElementTy, VF));
22032203
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
22042204
unsigned Opcode = RdxDesc.getOpcode();
22052205

@@ -2450,7 +2450,7 @@ void VPPredInstPHIRecipe::print(raw_ostream &O, const Twine &Indent,
24502450

24512451
InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
24522452
VPCostContext &Ctx) const {
2453-
Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
2453+
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
24542454
const Align Alignment =
24552455
getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
24562456
unsigned AS =
@@ -2597,7 +2597,7 @@ InstructionCost VPWidenLoadEVLRecipe::computeCost(ElementCount VF,
25972597
// legacy model, it will always calculate the cost of mask.
25982598
// TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
25992599
// don't need to compare to the legacy cost model.
2600-
Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
2600+
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
26012601
const Align Alignment =
26022602
getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
26032603
unsigned AS =
@@ -2718,7 +2718,7 @@ InstructionCost VPWidenStoreEVLRecipe::computeCost(ElementCount VF,
27182718
// legacy model, it will always calculate the cost of mask.
27192719
// TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
27202720
// don't need to compare to the legacy cost model.
2721-
Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
2721+
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
27222722
const Align Alignment =
27232723
getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
27242724
unsigned AS =
@@ -3086,7 +3086,7 @@ InstructionCost VPInterleaveRecipe::computeCost(ElementCount VF,
30863086
Type *ValTy = Ctx.Types.inferScalarType(
30873087
getNumDefinedValues() > 0 ? getVPValue(InsertPosIdx)
30883088
: getStoredValues()[InsertPosIdx]);
3089-
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
3089+
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
30903090
unsigned AS = getLoadStoreAddressSpace(InsertPos);
30913091
enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
30923092

@@ -3347,7 +3347,7 @@ VPFirstOrderRecurrencePHIRecipe::computeCost(ElementCount VF,
33473347
SmallVector<int> Mask(VF.getKnownMinValue());
33483348
std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
33493349
Type *VectorTy =
3350-
ToVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);
3350+
toVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);
33513351

33523352
return Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
33533353
cast<VectorType>(VectorTy), Mask, CostKind,

0 commit comments

Comments
 (0)