diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h index f6ec21caa4d72..b42223eda9922 100644 --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -84,12 +84,13 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { using TTI = TargetTransformInfo; /// Helper function to access this as a T. - T *thisT() { return static_cast(this); } + const T *thisT() const { return static_cast(this); } /// Estimate a cost of Broadcast as an extract and sequence of insert /// operations. - InstructionCost getBroadcastShuffleOverhead(FixedVectorType *VTy, - TTI::TargetCostKind CostKind) { + InstructionCost + getBroadcastShuffleOverhead(FixedVectorType *VTy, + TTI::TargetCostKind CostKind) const { InstructionCost Cost = 0; // Broadcast cost is equal to the cost of extracting the zero'th element // plus the cost of inserting it into every element of the result vector. @@ -105,8 +106,9 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { /// Estimate a cost of shuffle as a sequence of extract and insert /// operations. - InstructionCost getPermuteShuffleOverhead(FixedVectorType *VTy, - TTI::TargetCostKind CostKind) { + InstructionCost + getPermuteShuffleOverhead(FixedVectorType *VTy, + TTI::TargetCostKind CostKind) const { InstructionCost Cost = 0; // Shuffle cost is equal to the cost of extracting element from its argument // plus the cost of inserting them onto the result vector. @@ -129,7 +131,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { InstructionCost getExtractSubvectorOverhead(VectorType *VTy, TTI::TargetCostKind CostKind, int Index, - FixedVectorType *SubVTy) { + FixedVectorType *SubVTy) const { assert(VTy && SubVTy && "Can only extract subvectors from vectors"); int NumSubElts = SubVTy->getNumElements(); @@ -157,7 +159,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { InstructionCost getInsertSubvectorOverhead(VectorType *VTy, TTI::TargetCostKind CostKind, int Index, - FixedVectorType *SubVTy) { + FixedVectorType *SubVTy) const { assert(VTy && SubVTy && "Can only insert subvectors into vectors"); int NumSubElts = SubVTy->getNumElements(); @@ -211,7 +213,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { bool VariableMask, bool IsGatherScatter, TTI::TargetCostKind CostKind, - unsigned AddressSpace = 0) { + unsigned AddressSpace = 0) const { // We cannot scalarize scalable vectors, so return Invalid. if (isa(DataTy)) return InstructionCost::getInvalid(); @@ -299,7 +301,8 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { /// (e.g. scalarization). std::optional getMultipleResultIntrinsicVectorLibCallCost( const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind, - RTLIB::Libcall LC, std::optional CallRetElementIndex = {}) { + RTLIB::Libcall LC, + std::optional CallRetElementIndex = {}) const { Type *RetTy = ICA.getReturnType(); // Vector variants of the intrinsic can be mapped to a vector library call. auto const *LibInfo = ICA.getLibInfo(); @@ -866,7 +869,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, - ArrayRef VL = {}) { + ArrayRef VL = {}) const { /// FIXME: a bitfield is not a reasonable abstraction for talking about /// which elements are needed from a scalable vector if (isa(InTy)) @@ -917,7 +920,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead. InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { if (isa(InTy)) return InstructionCost::getInvalid(); auto *Ty = cast(InTy); @@ -933,7 +936,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { InstructionCost getOperandsScalarizationOverhead(ArrayRef Args, ArrayRef Tys, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { assert(Args.size() == Tys.size() && "Expected matching Args and Tys"); InstructionCost Cost = 0; @@ -963,7 +966,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef Args, ArrayRef Tys, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { InstructionCost Cost = getScalarizationOverhead( RetTy, /*Insert*/ true, /*Extract*/ false, CostKind); if (!Args.empty()) @@ -1018,7 +1021,8 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = {}, const Instruction *CxtI = nullptr) { + ArrayRef Args = {}, + const Instruction *CxtI = nullptr) const { // Check if any of the operands are vector operands. const TargetLoweringBase *TLI = getTLI(); int ISD = TLI->InstructionOpcodeToISD(Opcode); @@ -1147,7 +1151,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef Args = {}, - const Instruction *CxtI = nullptr) { + const Instruction *CxtI = nullptr) const { switch (improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp)) { case TTI::SK_Broadcast: if (auto *FVT = dyn_cast(Tp)) @@ -1175,7 +1179,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr) { + const Instruction *I = nullptr) const { if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0) return 0; @@ -1289,7 +1293,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { DstVTy->getElementCount().isVector()) { Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy); Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy); - T *TTI = static_cast(this); + const T *TTI = thisT(); // If both types need to be split then the split is free. InstructionCost SplitCost = (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0; @@ -1342,7 +1346,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { } InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr) { + const Instruction *I = nullptr) const { return BaseT::getCFInstrCost(Opcode, CostKind, I); } @@ -1351,7 +1355,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr) { + const Instruction *I = nullptr) const { const TargetLoweringBase *TLI = getTLI(); int ISD = TLI->InstructionOpcodeToISD(Opcode); assert(ISD && "Invalid opcode"); @@ -1401,7 +1405,8 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, - unsigned Index, Value *Op0, Value *Op1) { + unsigned Index, Value *Op0, + Value *Op1) const { return getRegUsageForType(Val->getScalarType()); } @@ -1430,10 +1435,10 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { Op1); } - InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, - int VF, - const APInt &DemandedDstElts, - TTI::TargetCostKind CostKind) { + InstructionCost + getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, + const APInt &DemandedDstElts, + TTI::TargetCostKind CostKind) const { assert(DemandedDstElts.getBitWidth() == (unsigned)VF * ReplicationFactor && "Unexpected size of DemandedDstElts."); @@ -1463,11 +1468,11 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { return Cost; } - InstructionCost - getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, - unsigned AddressSpace, TTI::TargetCostKind CostKind, - TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr) { + InstructionCost getMemoryOpCost( + unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, + TTI::TargetCostKind CostKind, + TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, + const Instruction *I = nullptr) const { assert(!Src->isVoidTy() && "Invalid type"); // Assume types, such as structs, are expensive. if (getTLI()->getValueType(DL, Src, true) == MVT::Other) @@ -1510,7 +1515,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { // TODO: Pass on AddressSpace when we have test coverage. return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false, CostKind); @@ -1520,14 +1525,14 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr) { + const Instruction *I = nullptr) const { return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask, true, CostKind); } InstructionCost getExpandCompressMemoryOpCost( unsigned Opcode, Type *DataTy, bool VariableMask, Align Alignment, - TTI::TargetCostKind CostKind, const Instruction *I = nullptr) { + TTI::TargetCostKind CostKind, const Instruction *I = nullptr) const { // Treat expand load/compress store as gather/scatter operation. // TODO: implement more precise cost estimation for these intrinsics. return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask, @@ -1538,7 +1543,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { // For a target without strided memory operations (or for an illegal // operation type on one which does), assume we lower to a gather/scatter // operation. (Which may in turn be scalarized.) @@ -1691,7 +1696,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { /// Get intrinsic cost based on arguments. InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { // Check for generically free intrinsics. if (BaseT::getIntrinsicInstrCost(ICA, CostKind) == 0) return 0; @@ -2095,7 +2100,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { /// based on types. InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { Intrinsic::ID IID = ICA.getID(); Type *RetTy = ICA.getReturnType(); const SmallVectorImpl &Tys = ICA.getArgTypes(); @@ -2859,11 +2864,11 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { /// \returns The cost of Call instruction. InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef Tys, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { return 10; } - unsigned getNumberOfParts(Type *Tp) { + unsigned getNumberOfParts(Type *Tp) const { std::pair LT = getTypeLegalizationCost(Tp); if (!LT.first.isValid()) return 0; @@ -2907,7 +2912,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { /// The cost model should take into account that the actual length of the /// vector is reduced on each iteration. InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { // Targets must implement a default value for the scalable case, since // we don't know how many lanes the vector has. if (isa(Ty)) @@ -2983,7 +2988,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { /// fixed-width vectors here because for scalable vectors we do not know the /// runtime number of operations. InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { // Targets must implement a default value for the scalable case, since // we don't know how many lanes the vector has. if (isa(Ty)) @@ -2999,9 +3004,10 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { return ExtractCost + ArithCost; } - InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, - std::optional FMF, - TTI::TargetCostKind CostKind) { + InstructionCost + getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, + std::optional FMF, + TTI::TargetCostKind CostKind) const { assert(Ty && "Unknown reduction vector type"); if (TTI::requiresOrderedReduction(FMF)) return getOrderedReductionCost(Opcode, Ty, CostKind); @@ -3012,7 +3018,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { /// \param CondTy Conditional type for the Select instruction. InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { // Targets must implement a default value for the scalable case, since // we don't know how many lanes the vector has. if (isa(Ty)) @@ -3106,7 +3112,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { return RedCost + MulCost + 2 * ExtCost; } - InstructionCost getVectorSplitCost() { return 1; } + InstructionCost getVectorSplitCost() const { return 1; } /// @} }; diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index 372b70a4b2d64..7a5a2f700a97e 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -577,7 +577,7 @@ static InstructionCost getHistogramCost(const IntrinsicCostAttributes &ICA) { InstructionCost AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { // The code-generator is currently not able to handle scalable vectors // of yet, so return an invalid cost to avoid selecting // it. This change will be removed when code-generation for these types is @@ -2806,7 +2806,7 @@ AArch64TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode, ArrayRef Args, - Type *SrcOverrideTy) { + Type *SrcOverrideTy) const { // A helper that returns a vector type from the given type. The number of // elements in type Ty determines the vector width. auto toVectorTy = [&](Type *ArgTy) { @@ -2903,7 +2903,7 @@ bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode, // trunc i16 (lshr (add %x, %y), 1) -> i8 // bool AArch64TTIImpl::isExtPartOfAvgExpr(const Instruction *ExtUser, Type *Dst, - Type *Src) { + Type *Src) const { // The source should be a legal vector type. if (!Src->isVectorTy() || !TLI->isTypeLegal(TLI->getValueType(DL, Src)) || (Src->isScalableTy() && !ST->hasSVE2())) @@ -2948,7 +2948,7 @@ InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { int ISD = TLI->InstructionOpcodeToISD(Opcode); assert(ISD && "Invalid opcode"); // If the cast is observable, and it is used by a widening instruction (e.g., @@ -3619,7 +3619,7 @@ InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { if (CostKind != TTI::TCK_RecipThroughput) return Opcode == Instruction::PHI ? 0 : 1; assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind"); @@ -3630,7 +3630,7 @@ InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode, InstructionCost AArch64TTIImpl::getVectorInstrCostHelper( unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, bool HasRealUse, const Instruction *I, Value *Scalar, - ArrayRef> ScalarUserAndIdx) { + ArrayRef> ScalarUserAndIdx) const { assert(Val->isVectorTy() && "This must be a vector type"); if (Index != -1U) { @@ -3802,7 +3802,7 @@ InstructionCost AArch64TTIImpl::getVectorInstrCostHelper( InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, - Value *Op1) { + Value *Op1) const { bool HasRealUse = Opcode == Instruction::InsertElement && Op0 && !isa(Op0); return getVectorInstrCostHelper(Opcode, Val, CostKind, Index, HasRealUse); @@ -3826,7 +3826,7 @@ InstructionCost AArch64TTIImpl::getVectorInstrCost(const Instruction &I, InstructionCost AArch64TTIImpl::getScalarizationOverhead( VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, - TTI::TargetCostKind CostKind, ArrayRef VL) { + TTI::TargetCostKind CostKind, ArrayRef VL) const { if (isa(Ty)) return InstructionCost::getInvalid(); if (Ty->getElementType()->isFloatingPointTy()) @@ -3840,8 +3840,7 @@ InstructionCost AArch64TTIImpl::getScalarizationOverhead( InstructionCost AArch64TTIImpl::getArithmeticInstrCost( unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, - ArrayRef Args, - const Instruction *CxtI) { + ArrayRef Args, const Instruction *CxtI) const { // The code-generator is currently not able to handle scalable vectors // of yet, so return an invalid cost to avoid selecting @@ -4171,7 +4170,7 @@ InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty, InstructionCost AArch64TTIImpl::getCmpSelInstrCost( unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, - TTI::OperandValueInfo Op2Info, const Instruction *I) { + TTI::OperandValueInfo Op2Info, const Instruction *I) const { // TODO: Handle other cost kinds. if (CostKind != TTI::TCK_RecipThroughput) return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, @@ -4284,7 +4283,7 @@ bool AArch64TTIImpl::prefersVectorizedAddressing() const { InstructionCost AArch64TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { if (useNeonVector(Src)) return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind); @@ -4331,7 +4330,7 @@ static unsigned getSVEGatherScatterOverhead(unsigned Opcode, InstructionCost AArch64TTIImpl::getGatherScatterOpCost( unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, - Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { + Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { if (useNeonVector(DataTy) || !isLegalMaskedGatherScatter(DataTy)) return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I); @@ -4371,7 +4370,7 @@ InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo, - const Instruction *I) { + const Instruction *I) const { EVT VT = TLI->getValueType(DL, Ty, true); // Type legalization can't handle structs if (VT == MVT::Other) @@ -4980,7 +4979,7 @@ bool AArch64TTIImpl::isLegalToVectorizeReduction( InstructionCost AArch64TTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { // The code-generator is currently not able to handle scalable vectors // of yet, so return an invalid cost to avoid selecting // it. This change will be removed when code-generation for these types is @@ -5005,7 +5004,7 @@ AArch64TTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, } InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE( - unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) { + unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) const { std::pair LT = getTypeLegalizationCost(ValTy); InstructionCost LegalizationCost = 0; if (LT.first > 1) { @@ -5032,7 +5031,7 @@ InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE( InstructionCost AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, std::optional FMF, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { // The code-generator is currently not able to handle scalable vectors // of yet, so return an invalid cost to avoid selecting // it. This change will be removed when code-generation for these types is @@ -5207,8 +5206,9 @@ AArch64TTIImpl::getMulAccReductionCost(bool IsUnsigned, Type *ResTy, return BaseT::getMulAccReductionCost(IsUnsigned, ResTy, VecTy, CostKind); } -InstructionCost AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index, - TTI::TargetCostKind CostKind) { +InstructionCost +AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index, + TTI::TargetCostKind CostKind) const { static const CostTblEntry ShuffleTbl[] = { { TTI::SK_Splice, MVT::nxv16i8, 1 }, { TTI::SK_Splice, MVT::nxv8i16, 1 }, @@ -5340,7 +5340,7 @@ InstructionCost AArch64TTIImpl::getPartialReductionCost( InstructionCost AArch64TTIImpl::getShuffleCost( TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, - ArrayRef Args, const Instruction *CxtI) { + ArrayRef Args, const Instruction *CxtI) const { std::pair LT = getTypeLegalizationCost(Tp); // If we have a Mask, and the LT is being legalized somehow, split the Mask diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h index 372ec22bd548f..a2e766676d3ed 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -61,7 +61,7 @@ class AArch64TTIImpl : public BasicTTIImplBase { bool isWideningInstruction(Type *DstTy, unsigned Opcode, ArrayRef Args, - Type *SrcOverrideTy = nullptr); + Type *SrcOverrideTy = nullptr) const; // A helper function called by 'getVectorInstrCost'. // @@ -75,7 +75,7 @@ class AArch64TTIImpl : public BasicTTIImplBase { InstructionCost getVectorInstrCostHelper( unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, bool HasRealUse, const Instruction *I = nullptr, Value *Scalar = nullptr, - ArrayRef> ScalarUserAndIdx = {}); + ArrayRef> ScalarUserAndIdx = {}) const; public: explicit AArch64TTIImpl(const AArch64TargetMachine *TM, const Function &F) @@ -131,7 +131,7 @@ class AArch64TTIImpl : public BasicTTIImplBase { } InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; std::optional instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const; @@ -173,30 +173,32 @@ class AArch64TTIImpl : public BasicTTIImplBase { InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; - bool isExtPartOfAvgExpr(const Instruction *ExtUser, Type *Dst, Type *Src); + bool isExtPartOfAvgExpr(const Instruction *ExtUser, Type *Dst, + Type *Src) const; InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index); InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, - unsigned Index, Value *Op0, Value *Op1); + unsigned Index, Value *Op0, + Value *Op1) const; /// \param ScalarUserAndIdx encodes the information about extracts from a /// vector with 'Scalar' being the value being extracted,'User' being the user @@ -213,20 +215,21 @@ class AArch64TTIImpl : public BasicTTIImplBase { InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; - InstructionCost getArithmeticReductionCostSVE(unsigned Opcode, - VectorType *ValTy, - TTI::TargetCostKind CostKind); + InstructionCost + getArithmeticReductionCostSVE(unsigned Opcode, VectorType *ValTy, + TTI::TargetCostKind CostKind) const; InstructionCost getSpliceCost(VectorType *Tp, int Index, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; InstructionCost getArithmeticInstrCost( unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = {}, const Instruction *CxtI = nullptr); + ArrayRef Args = {}, + const Instruction *CxtI = nullptr) const; InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *SE, const SCEV *Ptr); @@ -236,17 +239,17 @@ class AArch64TTIImpl : public BasicTTIImplBase { TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const; bool useNeonVector(const Type *Ty) const; - InstructionCost - getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, - unsigned AddressSpace, TTI::TargetCostKind CostKind, - TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + InstructionCost getMemoryOpCost( + unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, + TTI::TargetCostKind CostKind, + TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, + const Instruction *I = nullptr) const; InstructionCost getCostOfKeepingLiveOverCall(ArrayRef Tys); @@ -423,9 +426,10 @@ class AArch64TTIImpl : public BasicTTIImplBase { return ST->hasSVE(); } - InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, - std::optional FMF, - TTI::TargetCostKind CostKind); + InstructionCost + getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, + std::optional FMF, + TTI::TargetCostKind CostKind) const; InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, @@ -441,13 +445,13 @@ class AArch64TTIImpl : public BasicTTIImplBase { TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef Args = {}, - const Instruction *CxtI = nullptr); + const Instruction *CxtI = nullptr) const; InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, - ArrayRef VL = {}); + ArrayRef VL = {}) const; /// Return the cost of the scaling factor used in the addressing /// mode represented by AM for this target, for a load/store diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp index 09f7877b13b3a..223e1a2084730 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -523,8 +523,7 @@ bool GCNTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, InstructionCost GCNTTIImpl::getArithmeticInstrCost( unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, - ArrayRef Args, - const Instruction *CxtI) { + ArrayRef Args, const Instruction *CxtI) const { // Legalize the type. std::pair LT = getTypeLegalizationCost(Ty); @@ -702,7 +701,7 @@ static bool intrinsicHasPackedVectorBenefit(Intrinsic::ID ID) { InstructionCost GCNTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { if (ICA.getID() == Intrinsic::fabs) return 0; @@ -772,7 +771,7 @@ GCNTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, InstructionCost GCNTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { assert((I == nullptr || I->getOpcode() == Opcode) && "Opcode should reflect passed instruction."); const bool SCost = @@ -803,7 +802,7 @@ InstructionCost GCNTTIImpl::getCFInstrCost(unsigned Opcode, InstructionCost GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional FMF, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { if (TTI::requiresOrderedReduction(FMF)) return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind); @@ -821,7 +820,7 @@ GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, InstructionCost GCNTTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { EVT OrigTy = TLI->getValueType(DL, Ty); // Computes cost on targets that have packed math instructions(which support @@ -1125,7 +1124,7 @@ InstructionCost GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef Args, - const Instruction *CxtI) { + const Instruction *CxtI) const { if (!isa(VT)) return BaseT::getShuffleCost(Kind, VT, Mask, CostKind, Index, SubTp); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h index f5062070ac6f4..32fa40a52d16b 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h @@ -157,10 +157,11 @@ class GCNTTIImpl final : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = {}, const Instruction *CxtI = nullptr); + ArrayRef Args = {}, + const Instruction *CxtI = nullptr) const; InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; bool isInlineAsmSourceOfDivergence(const CallInst *CI, ArrayRef Indices = {}) const; @@ -238,14 +239,14 @@ class GCNTTIImpl final : public BasicTTIImplBase { std::function SimplifyAndSetOp) const; - InstructionCost getVectorSplitCost() { return 0; } + InstructionCost getVectorSplitCost() const { return 0; } InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef Args = {}, - const Instruction *CxtI = nullptr); + const Instruction *CxtI = nullptr) const; bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl &Ops) const; @@ -260,15 +261,16 @@ class GCNTTIImpl final : public BasicTTIImplBase { int getInlinerVectorBonusPercent() const { return InlinerVectorBonusPercent; } - InstructionCost getArithmeticReductionCost( - unsigned Opcode, VectorType *Ty, std::optional FMF, - TTI::TargetCostKind CostKind); + InstructionCost + getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, + std::optional FMF, + TTI::TargetCostKind CostKind) const; InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; /// Data cache line size for LoopDataPrefetch pass. Has no use before GFX12. unsigned getCacheLineSize() const override { return 128; } diff --git a/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp index ad4aaa8fdef84..430c39bf0bade 100644 --- a/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp +++ b/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp @@ -93,7 +93,7 @@ unsigned R600TTIImpl::getMaxInterleaveFactor(ElementCount VF) { InstructionCost R600TTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) return Opcode == Instruction::PHI ? 0 : 1; diff --git a/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.h b/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.h index 2934b0151f4df..80ecb3986907a 100644 --- a/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.h +++ b/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.h @@ -59,7 +59,7 @@ class R600TTIImpl final : public BasicTTIImplBase { unsigned AddrSpace) const; unsigned getMaxInterleaveFactor(ElementCount VF); InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; using BaseT::getVectorInstrCost; InstructionCost getVectorInstrCost(unsigned Opcode, Type *ValTy, TTI::TargetCostKind CostKind, diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp index 2f9c262511ae4..0127d3885d7ee 100644 --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -482,7 +482,7 @@ InstructionCost ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, InstructionCost ARMTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { if (CostKind == TTI::TCK_RecipThroughput && (ST->hasNEON() || ST->hasMVEIntegerOps())) { // FIXME: The vectorizer is highly sensistive to the cost of these @@ -498,7 +498,7 @@ InstructionCost ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { int ISD = TLI->InstructionOpcodeToISD(Opcode); assert(ISD && "Invalid opcode"); @@ -940,7 +940,7 @@ InstructionCost ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, InstructionCost ARMTTIImpl::getCmpSelInstrCost( unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, - TTI::OperandValueInfo Op2Info, const Instruction *I) { + TTI::OperandValueInfo Op2Info, const Instruction *I) const { int ISD = TLI->InstructionOpcodeToISD(Opcode); // Thumb scalar code size cost for select. @@ -1123,7 +1123,7 @@ bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) { } bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment, - unsigned /*AddressSpace*/) { + unsigned /*AddressSpace*/) const { if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps()) return false; @@ -1221,7 +1221,7 @@ int ARMTTIImpl::getNumMemOps(const IntrinsicInst *I) const { return -1; } -InstructionCost ARMTTIImpl::getMemcpyCost(const Instruction *I) { +InstructionCost ARMTTIImpl::getMemcpyCost(const Instruction *I) const { int NumOps = getNumMemOps(cast(I)); // To model the cost of a library call, we assume 1 for the call, and @@ -1236,7 +1236,7 @@ InstructionCost ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef Args, - const Instruction *CxtI) { + const Instruction *CxtI) const { Kind = improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp); // Treat extractsubvector as single op permutation. bool IsExtractSubvector = Kind == TTI::SK_ExtractSubvector; @@ -1349,8 +1349,7 @@ InstructionCost ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, InstructionCost ARMTTIImpl::getArithmeticInstrCost( unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, - ArrayRef Args, - const Instruction *CxtI) { + ArrayRef Args, const Instruction *CxtI) const { int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode); if (ST->isThumb() && CostKind == TTI::TCK_CodeSize && Ty->isIntegerTy(1)) { // Make operations on i1 relatively expensive as this often involves @@ -1549,7 +1548,7 @@ InstructionCost ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo, - const Instruction *I) { + const Instruction *I) const { // TODO: Handle other cost kinds. if (CostKind != TTI::TCK_RecipThroughput) return 1; @@ -1594,7 +1593,7 @@ InstructionCost ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, InstructionCost ARMTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { if (ST->hasMVEIntegerOps()) { if (Opcode == Instruction::Load && isLegalMaskedLoad(Src, Alignment, AddressSpace)) @@ -1654,7 +1653,7 @@ InstructionCost ARMTTIImpl::getInterleavedMemoryOpCost( InstructionCost ARMTTIImpl::getGatherScatterOpCost( unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, - Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { + Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { using namespace PatternMatch; if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters) return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, @@ -1759,7 +1758,7 @@ InstructionCost ARMTTIImpl::getGatherScatterOpCost( InstructionCost ARMTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, std::optional FMF, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { EVT ValVT = TLI->getValueType(DL, ValTy); int ISD = TLI->InstructionOpcodeToISD(Opcode); @@ -1906,7 +1905,7 @@ ARMTTIImpl::getMulAccReductionCost(bool IsUnsigned, Type *ResTy, InstructionCost ARMTTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { EVT ValVT = TLI->getValueType(DL, Ty); // In general floating point reductions are a series of elementwise @@ -1965,7 +1964,7 @@ ARMTTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, InstructionCost ARMTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { unsigned Opc = ICA.getID(); switch (Opc) { case Intrinsic::get_active_lane_mask: @@ -2109,7 +2108,7 @@ ARMTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, return BaseT::getIntrinsicInstrCost(ICA, CostKind); } -bool ARMTTIImpl::isLoweredToCall(const Function *F) { +bool ARMTTIImpl::isLoweredToCall(const Function *F) const { if (!F->isIntrinsic()) return BaseT::isLoweredToCall(F); diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h index 2b144f1628038..23b8f5220dd15 100644 --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h @@ -184,10 +184,11 @@ class ARMTTIImpl : public BasicTTIImplBase { bool isProfitableLSRChainElement(Instruction *I); - bool isLegalMaskedLoad(Type *DataTy, Align Alignment, unsigned AddressSpace); + bool isLegalMaskedLoad(Type *DataTy, Align Alignment, + unsigned AddressSpace) const; bool isLegalMaskedStore(Type *DataTy, Align Alignment, - unsigned AddressSpace) { + unsigned AddressSpace) const { return isLegalMaskedLoad(DataTy, Alignment, AddressSpace); } @@ -209,7 +210,7 @@ class ARMTTIImpl : public BasicTTIImplBase { return isLegalMaskedGather(Ty, Alignment); } - InstructionCost getMemcpyCost(const Instruction *I); + InstructionCost getMemcpyCost(const Instruction *I) const; uint64_t getMaxMemIntrinsicInlineSizeThreshold() const { return ST->getMaxInlineSizeThreshold(); @@ -222,7 +223,7 @@ class ARMTTIImpl : public BasicTTIImplBase { TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef Args = {}, - const Instruction *CxtI = nullptr); + const Instruction *CxtI = nullptr) const; bool preferInLoopReduction(RecurKind Kind, Type *Ty) const; @@ -231,19 +232,19 @@ class ARMTTIImpl : public BasicTTIImplBase { bool shouldExpandReduction(const IntrinsicInst *II) const { return false; } InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; InstructionCost getCmpSelInstrCost( unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; using BaseT::getVectorInstrCost; InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, @@ -257,17 +258,18 @@ class ARMTTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = {}, const Instruction *CxtI = nullptr); + ArrayRef Args = {}, + const Instruction *CxtI = nullptr) const; - InstructionCost - getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, - unsigned AddressSpace, TTI::TargetCostKind CostKind, - TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + InstructionCost getMemoryOpCost( + unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, + TTI::TargetCostKind CostKind, + TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, + const Instruction *I = nullptr) const; InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; InstructionCost getInterleavedMemoryOpCost( unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef Indices, @@ -278,11 +280,12 @@ class ARMTTIImpl : public BasicTTIImplBase { const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; - InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, - std::optional FMF, - TTI::TargetCostKind CostKind); + InstructionCost + getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, + std::optional FMF, + TTI::TargetCostKind CostKind) const; InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, std::optional FMF, @@ -293,10 +296,10 @@ class ARMTTIImpl : public BasicTTIImplBase { InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; /// getScalingFactorCost - Return the cost of the scaling used in /// addressing mode represented by AM. @@ -307,7 +310,7 @@ class ARMTTIImpl : public BasicTTIImplBase { int64_t Scale, unsigned AddrSpace) const; bool maybeLoweredToCall(Instruction &I); - bool isLoweredToCall(const Function *F); + bool isLoweredToCall(const Function *F) const; bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, diff --git a/llvm/lib/Target/BPF/BPFTargetTransformInfo.h b/llvm/lib/Target/BPF/BPFTargetTransformInfo.h index bf0bef3a2b2f9..47d11bf6805c9 100644 --- a/llvm/lib/Target/BPF/BPFTargetTransformInfo.h +++ b/llvm/lib/Target/BPF/BPFTargetTransformInfo.h @@ -49,7 +49,7 @@ class BPFTTIImpl : public BasicTTIImplBase { TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - const llvm::Instruction *I = nullptr) { + const llvm::Instruction *I = nullptr) const { if (Opcode == Instruction::Select) return SCEVCheapExpansionBudget.getValue(); @@ -61,7 +61,8 @@ class BPFTTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = {}, const Instruction *CxtI = nullptr) { + ArrayRef Args = {}, + const Instruction *CxtI = nullptr) const { int ISD = TLI->InstructionOpcodeToISD(Opcode); if (ISD == ISD::ADD && CostKind == TTI::TCK_RecipThroughput) return SCEVCheapExpansionBudget.getValue() + 1; diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp index c3c77b514882b..e3e9a8612be1e 100644 --- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp @@ -138,15 +138,15 @@ ElementCount HexagonTTIImpl::getMinimumVF(unsigned ElemWidth, return ElementCount::getFixed((8 * ST.getVectorLength()) / ElemWidth); } -InstructionCost HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy, - ArrayRef Tys, - TTI::TargetCostKind CostKind) { +InstructionCost +HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy, ArrayRef Tys, + TTI::TargetCostKind CostKind) const { return BaseT::getCallInstrCost(F, RetTy, Tys, CostKind); } InstructionCost HexagonTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { if (ICA.getID() == Intrinsic::bswap) { std::pair LT = getTypeLegalizationCost(ICA.getReturnType()); @@ -166,7 +166,7 @@ InstructionCost HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo, - const Instruction *I) { + const Instruction *I) const { assert(Opcode == Instruction::Load || Opcode == Instruction::Store); // TODO: Handle other cost kinds. if (CostKind != TTI::TCK_RecipThroughput) @@ -221,7 +221,7 @@ InstructionCost HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, InstructionCost HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind); } @@ -231,13 +231,13 @@ InstructionCost HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, TTI::TargetCostKind CostKind, int Index, Type *SubTp, ArrayRef Args, - const Instruction *CxtI) { + const Instruction *CxtI) const { return 1; } InstructionCost HexagonTTIImpl::getGatherScatterOpCost( unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, - Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { + Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I); } @@ -258,7 +258,7 @@ InstructionCost HexagonTTIImpl::getInterleavedMemoryOpCost( InstructionCost HexagonTTIImpl::getCmpSelInstrCost( unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, - TTI::OperandValueInfo Op2Info, const Instruction *I) { + TTI::OperandValueInfo Op2Info, const Instruction *I) const { if (ValTy->isVectorTy() && CostKind == TTI::TCK_RecipThroughput) { if (!isHVXVectorType(ValTy) && ValTy->isFPOrFPVectorTy()) return InstructionCost::getMax(); @@ -273,8 +273,7 @@ InstructionCost HexagonTTIImpl::getCmpSelInstrCost( InstructionCost HexagonTTIImpl::getArithmeticInstrCost( unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, - ArrayRef Args, - const Instruction *CxtI) { + ArrayRef Args, const Instruction *CxtI) const { // TODO: Handle more cost kinds. if (CostKind != TTI::TCK_RecipThroughput) return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, @@ -295,7 +294,7 @@ InstructionCost HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy, Type *SrcTy, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { auto isNonHVXFP = [this] (Type *Ty) { return Ty->isVectorTy() && !isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy(); }; diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h index b23369ac054b9..e69019d159ace 100644 --- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h +++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h @@ -105,29 +105,29 @@ class HexagonTTIImpl : public BasicTTIImplBase { InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef Tys, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; InstructionCost getAddressComputationCost(Type *Tp, ScalarEvolution *SE, const SCEV *S); - InstructionCost - getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, - unsigned AddressSpace, TTI::TargetCostKind CostKind, - TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + InstructionCost getMemoryOpCost( + unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, + TTI::TargetCostKind CostKind, + TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, + const Instruction *I = nullptr) const; InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; InstructionCost getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, Type *SubTp, ArrayRef Args = {}, - const Instruction *CxtI = nullptr); + const Instruction *CxtI = nullptr) const; InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, - const Instruction *I); + const Instruction *I) const; InstructionCost getInterleavedMemoryOpCost( unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, @@ -137,23 +137,24 @@ class HexagonTTIImpl : public BasicTTIImplBase { TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; InstructionCost getArithmeticInstrCost( unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = {}, const Instruction *CxtI = nullptr); + ArrayRef Args = {}, + const Instruction *CxtI = nullptr) const; InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; using BaseT::getVectorInstrCost; InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1); InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr) { + const Instruction *I = nullptr) const { return 1; } diff --git a/llvm/lib/Target/Lanai/LanaiTargetTransformInfo.h b/llvm/lib/Target/Lanai/LanaiTargetTransformInfo.h index 5fe63e4a2e031..14759e7ca8f4a 100644 --- a/llvm/lib/Target/Lanai/LanaiTargetTransformInfo.h +++ b/llvm/lib/Target/Lanai/LanaiTargetTransformInfo.h @@ -94,7 +94,8 @@ class LanaiTTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = {}, const Instruction *CxtI = nullptr) { + ArrayRef Args = {}, + const Instruction *CxtI = nullptr) const { int ISD = TLI->InstructionOpcodeToISD(Opcode); switch (ISD) { diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp index 72245fe83491d..b5f7d90cd29d9 100644 --- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp @@ -512,8 +512,7 @@ NVPTXTTIImpl::getInstructionCost(const User *U, InstructionCost NVPTXTTIImpl::getArithmeticInstrCost( unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, - ArrayRef Args, - const Instruction *CxtI) { + ArrayRef Args, const Instruction *CxtI) const { // Legalize the type. std::pair LT = getTypeLegalizationCost(Ty); diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h index 9e77f628da7a7..9fd5c17f58959 100644 --- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h +++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h @@ -103,13 +103,14 @@ class NVPTXTTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = {}, const Instruction *CxtI = nullptr); + ArrayRef Args = {}, + const Instruction *CxtI = nullptr) const; InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, - ArrayRef VL = {}) { + ArrayRef VL = {}) const { if (!InTy->getElementCount().isFixed()) return InstructionCost::getInvalid(); diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp index 3a4c2fcad8c83..074ad7a793d1a 100644 --- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp @@ -548,7 +548,8 @@ unsigned PPCTTIImpl::getMaxInterleaveFactor(ElementCount VF) { // thereby reducing the overall throughput of vector code wrt. scalar code. // An invalid instruction cost is returned if the type is an MMA vector type. InstructionCost PPCTTIImpl::vectorCostAdjustmentFactor(unsigned Opcode, - Type *Ty1, Type *Ty2) { + Type *Ty1, + Type *Ty2) const { // If the vector type is of an MMA type (v256i1, v512i1), an invalid // instruction cost is returned. This is to signify to other cost computing // functions to return the maximum instruction cost in order to prevent any @@ -581,8 +582,7 @@ InstructionCost PPCTTIImpl::vectorCostAdjustmentFactor(unsigned Opcode, InstructionCost PPCTTIImpl::getArithmeticInstrCost( unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, - ArrayRef Args, - const Instruction *CxtI) { + ArrayRef Args, const Instruction *CxtI) const { assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty, nullptr); @@ -605,7 +605,7 @@ InstructionCost PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, TTI::TargetCostKind CostKind, int Index, Type *SubTp, ArrayRef Args, - const Instruction *CxtI) { + const Instruction *CxtI) const { InstructionCost CostFactor = vectorCostAdjustmentFactor(Instruction::ShuffleVector, Tp, nullptr); @@ -625,7 +625,7 @@ InstructionCost PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, InstructionCost PPCTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { if (CostKind != TTI::TCK_RecipThroughput) return Opcode == Instruction::PHI ? 0 : 1; // Branches are assumed to be predicted. @@ -636,7 +636,7 @@ InstructionCost PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Dst, Src); @@ -655,7 +655,7 @@ InstructionCost PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, InstructionCost PPCTTIImpl::getCmpSelInstrCost( unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, - TTI::OperandValueInfo Op2Info, const Instruction *I) { + TTI::OperandValueInfo Op2Info, const Instruction *I) const { InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, ValTy, nullptr); if (!CostFactor.isValid()) @@ -764,7 +764,7 @@ InstructionCost PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo, - const Instruction *I) { + const Instruction *I) const { InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Src, nullptr); if (!CostFactor.isValid()) @@ -890,7 +890,7 @@ InstructionCost PPCTTIImpl::getInterleavedMemoryOpCost( InstructionCost PPCTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { return BaseT::getIntrinsicInstrCost(ICA, CostKind); } diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h index bf3ddad134e14..3ddb322980a92 100644 --- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h +++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h @@ -101,44 +101,45 @@ class PPCTTIImpl : public BasicTTIImplBase { unsigned getPrefetchDistance() const override; unsigned getMaxInterleaveFactor(ElementCount VF); InstructionCost vectorCostAdjustmentFactor(unsigned Opcode, Type *Ty1, - Type *Ty2); + Type *Ty2) const; InstructionCost getArithmeticInstrCost( unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = {}, const Instruction *CxtI = nullptr); + ArrayRef Args = {}, + const Instruction *CxtI = nullptr) const; InstructionCost getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, Type *SubTp, ArrayRef Args = {}, - const Instruction *CxtI = nullptr); + const Instruction *CxtI = nullptr) const; InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; InstructionCost getCmpSelInstrCost( unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; using BaseT::getVectorInstrCost; InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1); - InstructionCost - getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, - unsigned AddressSpace, TTI::TargetCostKind CostKind, - TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + InstructionCost getMemoryOpCost( + unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, + TTI::TargetCostKind CostKind, + TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, + const Instruction *I = nullptr) const; InstructionCost getInterleavedMemoryOpCost( unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond = false, bool UseMaskForGaps = false); InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; bool areInlineCompatible(const Function *Caller, const Function *Callee) const; bool areTypesABICompatible(const Function *Caller, const Function *Callee, diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index 6bd60c20c8626..41f315bff2f67 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -45,7 +45,7 @@ static cl::opt InstructionCost RISCVTTIImpl::getRISCVInstructionCost(ArrayRef OpCodes, MVT VT, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { // Check if the type is valid for all CostKind if (!VT.isVector()) return InstructionCost::getInvalid(); @@ -343,7 +343,8 @@ RISCVTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { } InstructionCost -RISCVTTIImpl::getConstantPoolLoadCost(Type *Ty, TTI::TargetCostKind CostKind) { +RISCVTTIImpl::getConstantPoolLoadCost(Type *Ty, + TTI::TargetCostKind CostKind) const { // Add a cost of address generation + the cost of the load. The address // is expected to be a PC relative offset to a constant pool entry // using auipc/addi. @@ -397,7 +398,7 @@ static VectorType *getVRGatherIndexType(MVT DataVT, const RISCVSubtarget &ST, /// TTI::TCC_Basic). If the source register is just reused, the cost for /// this operation is 0. static InstructionCost -costShuffleViaVRegSplitting(RISCVTTIImpl &TTI, MVT LegalVT, +costShuffleViaVRegSplitting(const RISCVTTIImpl &TTI, MVT LegalVT, std::optional VLen, VectorType *Tp, ArrayRef Mask, TTI::TargetCostKind CostKind) { assert(LegalVT.isFixedLengthVector()); @@ -466,7 +467,7 @@ costShuffleViaVRegSplitting(RISCVTTIImpl &TTI, MVT LegalVT, InstructionCost RISCVTTIImpl::getSlideCost(FixedVectorType *Tp, ArrayRef Mask, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { // Avoid missing masks and length changing shuffles if (Mask.size() <= 2 || Mask.size() != Tp->getNumElements()) return InstructionCost::getInvalid(); @@ -527,7 +528,7 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef Args, - const Instruction *CxtI) { + const Instruction *CxtI) const { Kind = improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp); std::pair LT = getTypeLegalizationCost(Tp); @@ -837,7 +838,7 @@ static unsigned isM1OrSmaller(MVT VT) { InstructionCost RISCVTTIImpl::getScalarizationOverhead( VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, - TTI::TargetCostKind CostKind, ArrayRef VL) { + TTI::TargetCostKind CostKind, ArrayRef VL) const { if (isa(Ty)) return InstructionCost::getInvalid(); @@ -875,7 +876,7 @@ InstructionCost RISCVTTIImpl::getScalarizationOverhead( InstructionCost RISCVTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { if (!isLegalMaskedLoadStore(Src, Alignment) || CostKind != TTI::TCK_RecipThroughput) return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace, @@ -983,7 +984,7 @@ InstructionCost RISCVTTIImpl::getInterleavedMemoryOpCost( InstructionCost RISCVTTIImpl::getGatherScatterOpCost( unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, - Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { + Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { if (CostKind != TTI::TCK_RecipThroughput) return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I); @@ -1008,7 +1009,7 @@ InstructionCost RISCVTTIImpl::getGatherScatterOpCost( InstructionCost RISCVTTIImpl::getExpandCompressMemoryOpCost( unsigned Opcode, Type *DataTy, bool VariableMask, Align Alignment, - TTI::TargetCostKind CostKind, const Instruction *I) { + TTI::TargetCostKind CostKind, const Instruction *I) const { bool IsLegal = (Opcode == Instruction::Store && isLegalMaskedCompressStore(DataTy, Alignment)) || (Opcode == Instruction::Load && @@ -1046,7 +1047,7 @@ InstructionCost RISCVTTIImpl::getExpandCompressMemoryOpCost( InstructionCost RISCVTTIImpl::getStridedMemoryOpCost( unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, - Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { + Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { if (((Opcode == Instruction::Load || Opcode == Instruction::Store) && !isLegalStridedLoadStore(DataTy, Alignment)) || (Opcode != Instruction::Load && Opcode != Instruction::Store)) @@ -1167,7 +1168,7 @@ static unsigned getISDForVPIntrinsicID(Intrinsic::ID ID) { InstructionCost RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { auto *RetTy = ICA.getReturnType(); switch (ICA.getID()) { case Intrinsic::lrint: @@ -1470,7 +1471,7 @@ InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { bool IsVectorType = isa(Dst) && isa(Src); if (!IsVectorType) return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); @@ -1661,7 +1662,7 @@ InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); } -unsigned RISCVTTIImpl::getEstimatedVLFor(VectorType *Ty) { +unsigned RISCVTTIImpl::getEstimatedVLFor(VectorType *Ty) const { if (isa(Ty)) { const unsigned EltSize = DL.getTypeSizeInBits(Ty->getElementType()); const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinValue(); @@ -1674,7 +1675,7 @@ unsigned RISCVTTIImpl::getEstimatedVLFor(VectorType *Ty) { InstructionCost RISCVTTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { if (isa(Ty) && !ST->useRVVForFixedLengthVectors()) return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind); @@ -1780,7 +1781,7 @@ RISCVTTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, InstructionCost RISCVTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional FMF, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { if (isa(Ty) && !ST->useRVVForFixedLengthVectors()) return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind); @@ -1930,9 +1931,9 @@ InstructionCost RISCVTTIImpl::getExtendedReductionCost( getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); } -InstructionCost RISCVTTIImpl::getStoreImmCost(Type *Ty, - TTI::OperandValueInfo OpInfo, - TTI::TargetCostKind CostKind) { +InstructionCost +RISCVTTIImpl::getStoreImmCost(Type *Ty, TTI::OperandValueInfo OpInfo, + TTI::TargetCostKind CostKind) const { assert(OpInfo.isConstant() && "non constant operand?"); if (!isa(Ty)) // FIXME: We need to account for immediate materialization here, but doing @@ -1949,13 +1950,12 @@ InstructionCost RISCVTTIImpl::getStoreImmCost(Type *Ty, return getConstantPoolLoadCost(Ty, CostKind); } - InstructionCost RISCVTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo, - const Instruction *I) { + const Instruction *I) const { EVT VT = TLI->getValueType(DL, Src, true); // Type legalization can't handle structs if (VT == MVT::Other) @@ -1993,13 +1993,12 @@ InstructionCost RISCVTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, if (LT.second.isVector() && CostKind != TTI::TCK_CodeSize) BaseCost *= TLI->getLMULCost(LT.second); return Cost + BaseCost; - } InstructionCost RISCVTTIImpl::getCmpSelInstrCost( unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, - TTI::OperandValueInfo Op2Info, const Instruction *I) { + TTI::OperandValueInfo Op2Info, const Instruction *I) const { if (CostKind != TTI::TCK_RecipThroughput) return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, Op1Info, Op2Info, I); @@ -2158,7 +2157,7 @@ InstructionCost RISCVTTIImpl::getCmpSelInstrCost( InstructionCost RISCVTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { if (CostKind != TTI::TCK_RecipThroughput) return Opcode == Instruction::PHI ? 0 : 1; // Branches are assumed to be predicted. @@ -2315,7 +2314,7 @@ InstructionCost RISCVTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, InstructionCost RISCVTTIImpl::getArithmeticInstrCost( unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, - ArrayRef Args, const Instruction *CxtI) { + ArrayRef Args, const Instruction *CxtI) const { // TODO: Handle more cost kinds. if (CostKind != TTI::TCK_RecipThroughput) @@ -2635,7 +2634,8 @@ bool RISCVTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1, C2.ScaleCost, C2.ImmCost, C2.SetupCost); } -bool RISCVTTIImpl::isLegalMaskedExpandLoad(Type *DataTy, Align Alignment) { +bool RISCVTTIImpl::isLegalMaskedExpandLoad(Type *DataTy, + Align Alignment) const { auto *VTy = dyn_cast(DataTy); if (!VTy || VTy->isScalableTy()) return false; @@ -2652,7 +2652,8 @@ bool RISCVTTIImpl::isLegalMaskedExpandLoad(Type *DataTy, Align Alignment) { return true; } -bool RISCVTTIImpl::isLegalMaskedCompressStore(Type *DataTy, Align Alignment) { +bool RISCVTTIImpl::isLegalMaskedCompressStore(Type *DataTy, + Align Alignment) const { auto *VTy = dyn_cast(DataTy); if (!VTy || VTy->isScalableTy()) return false; diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h index c61dd1507f168..ac7603089a61b 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -45,7 +45,7 @@ class RISCVTTIImpl : public BasicTTIImplBase { /// This does unfortunately mean that we can both undershoot and overshot /// the true cost significantly if getVScaleForTuning is wildly off for the /// actual target hardware. - unsigned getEstimatedVLFor(VectorType *Ty); + unsigned getEstimatedVLFor(VectorType *Ty) const; /// This function calculates the costs for one or more RVV opcodes based /// on the vtype and the cost kind. @@ -56,17 +56,17 @@ class RISCVTTIImpl : public BasicTTIImplBase { /// refers to the result or source type. /// \param CostKind The type of cost to compute. InstructionCost getRISCVInstructionCost(ArrayRef OpCodes, MVT VT, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; /// Return the cost of accessing a constant pool entry of the specified /// type. InstructionCost getConstantPoolLoadCost(Type *Ty, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; /// If this shuffle can be lowered as a masked slide pair (at worst), /// return a cost for it. InstructionCost getSlideCost(FixedVectorType *Tp, ArrayRef Mask, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; public: explicit RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F) @@ -76,7 +76,7 @@ class RISCVTTIImpl : public BasicTTIImplBase { /// Return the cost of materializing an immediate for a value operand of /// a store instruction. InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind); @@ -135,7 +135,7 @@ class RISCVTTIImpl : public BasicTTIImplBase { InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; InstructionCost getPointersChainCost(ArrayRef Ptrs, const Value *Base, @@ -159,16 +159,16 @@ class RISCVTTIImpl : public BasicTTIImplBase { TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef Args = {}, - const Instruction *CxtI = nullptr); + const Instruction *CxtI = nullptr) const; InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, - ArrayRef VL = {}); + ArrayRef VL = {}) const; InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; InstructionCost getInterleavedMemoryOpCost( unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef Indices, @@ -179,55 +179,55 @@ class RISCVTTIImpl : public BasicTTIImplBase { const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, - const Instruction *I); + const Instruction *I) const; - InstructionCost getExpandCompressMemoryOpCost(unsigned Opcode, Type *Src, - bool VariableMask, - Align Alignment, - TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + InstructionCost + getExpandCompressMemoryOpCost(unsigned Opcode, Type *Src, bool VariableMask, + Align Alignment, TTI::TargetCostKind CostKind, + const Instruction *I = nullptr) const; InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, - const Instruction *I); + const Instruction *I) const; InstructionCost getCostOfKeepingLiveOverCall(ArrayRef Tys); InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; - InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, - std::optional FMF, - TTI::TargetCostKind CostKind); + InstructionCost + getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, + std::optional FMF, + TTI::TargetCostKind CostKind) const; InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, std::optional FMF, TTI::TargetCostKind CostKind); - InstructionCost - getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, - unsigned AddressSpace, TTI::TargetCostKind CostKind, - TTI::OperandValueInfo OpdInfo = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + InstructionCost getMemoryOpCost( + unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, + TTI::TargetCostKind CostKind, + TTI::OperandValueInfo OpdInfo = {TTI::OK_AnyValue, TTI::OP_None}, + const Instruction *I = nullptr) const; InstructionCost getCmpSelInstrCost( unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; using BaseT::getVectorInstrCost; InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, @@ -238,13 +238,14 @@ class RISCVTTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = {}, const Instruction *CxtI = nullptr); + ArrayRef Args = {}, + const Instruction *CxtI = nullptr) const; bool isElementTypeLegalForScalableVector(Type *Ty) const { return TLI->isLegalElementTypeForRVV(TLI->getValueType(DL, Ty)); } - bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) { + bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) const { if (!ST->hasVInstructions()) return false; @@ -270,7 +271,7 @@ class RISCVTTIImpl : public BasicTTIImplBase { return isLegalMaskedLoadStore(DataType, Alignment); } - bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) { + bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) const { if (!ST->hasVInstructions()) return false; @@ -293,10 +294,10 @@ class RISCVTTIImpl : public BasicTTIImplBase { return TLI->isLegalElementTypeForRVV(ElemType); } - bool isLegalMaskedGather(Type *DataType, Align Alignment) { + bool isLegalMaskedGather(Type *DataType, Align Alignment) const { return isLegalMaskedGatherScatter(DataType, Alignment); } - bool isLegalMaskedScatter(Type *DataType, Align Alignment) { + bool isLegalMaskedScatter(Type *DataType, Align Alignment) const { return isLegalMaskedGatherScatter(DataType, Alignment); } @@ -310,7 +311,7 @@ class RISCVTTIImpl : public BasicTTIImplBase { return ST->is64Bit() && !ST->hasVInstructionsI64(); } - bool isLegalStridedLoadStore(Type *DataType, Align Alignment) { + bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const { EVT DataTypeVT = TLI->getValueType(DL, DataType); return TLI->isLegalStridedLoadStore(DataTypeVT, Alignment); } @@ -321,9 +322,9 @@ class RISCVTTIImpl : public BasicTTIImplBase { DL); } - bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment); + bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const; - bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment); + bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment) const; bool isVScaleKnownToBeAPowerOfTwo() const { return TLI->isVScaleKnownToBeAPowerOfTwo(); diff --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp index e0b0099466c52..37c353d5bff09 100644 --- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp @@ -493,7 +493,7 @@ static bool isFreeEltLoad(Value *Op) { InstructionCost SystemZTTIImpl::getScalarizationOverhead( VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, - TTI::TargetCostKind CostKind, ArrayRef VL) { + TTI::TargetCostKind CostKind, ArrayRef VL) const { unsigned NumElts = cast(Ty)->getNumElements(); InstructionCost Cost = 0; @@ -541,8 +541,7 @@ static unsigned getNumVectorRegs(Type *Ty) { InstructionCost SystemZTTIImpl::getArithmeticInstrCost( unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, - ArrayRef Args, - const Instruction *CxtI) { + ArrayRef Args, const Instruction *CxtI) const { // TODO: Handle more cost kinds. if (CostKind != TTI::TCK_RecipThroughput) @@ -727,7 +726,7 @@ InstructionCost SystemZTTIImpl::getArithmeticInstrCost( InstructionCost SystemZTTIImpl::getShuffleCost( TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, - ArrayRef Args, const Instruction *CxtI) { + ArrayRef Args, const Instruction *CxtI) const { Kind = improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp); if (ST->hasVector()) { unsigned NumVectors = getNumVectorRegs(Tp); @@ -776,8 +775,7 @@ static unsigned getElSizeLog2Diff(Type *Ty0, Type *Ty1) { } // Return the number of instructions needed to truncate SrcTy to DstTy. -unsigned SystemZTTIImpl:: -getVectorTruncCost(Type *SrcTy, Type *DstTy) { +unsigned SystemZTTIImpl::getVectorTruncCost(Type *SrcTy, Type *DstTy) const { assert (SrcTy->isVectorTy() && DstTy->isVectorTy()); assert(SrcTy->getPrimitiveSizeInBits().getFixedValue() > DstTy->getPrimitiveSizeInBits().getFixedValue() && @@ -818,8 +816,8 @@ getVectorTruncCost(Type *SrcTy, Type *DstTy) { // Return the cost of converting a vector bitmask produced by a compare // (SrcTy), to the type of the select or extend instruction (DstTy). -unsigned SystemZTTIImpl:: -getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy) { +unsigned SystemZTTIImpl::getVectorBitmaskConversionCost(Type *SrcTy, + Type *DstTy) const { assert (SrcTy->isVectorTy() && DstTy->isVectorTy() && "Should only be called with vector types."); @@ -869,9 +867,9 @@ static Type *getCmpOpsType(const Instruction *I, unsigned VF = 1) { // Get the cost of converting a boolean vector to a vector with same width // and element size as Dst, plus the cost of zero extending if needed. -unsigned SystemZTTIImpl:: -getBoolVecToIntConversionCost(unsigned Opcode, Type *Dst, - const Instruction *I) { +unsigned +SystemZTTIImpl::getBoolVecToIntConversionCost(unsigned Opcode, Type *Dst, + const Instruction *I) const { auto *DstVTy = cast(Dst); unsigned VF = DstVTy->getNumElements(); unsigned Cost = 0; @@ -890,7 +888,7 @@ InstructionCost SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { // FIXME: Can the logic below also be used for these cost kinds? if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) { auto BaseCost = BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); @@ -1087,7 +1085,7 @@ static unsigned getOperandsExtensionCost(const Instruction *I) { InstructionCost SystemZTTIImpl::getCmpSelInstrCost( unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, - TTI::OperandValueInfo Op2Info, const Instruction *I) { + TTI::OperandValueInfo Op2Info, const Instruction *I) const { if (CostKind != TTI::TCK_RecipThroughput) return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, Op1Info, Op2Info); @@ -1209,8 +1207,8 @@ InstructionCost SystemZTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, } // Check if a load may be folded as a memory operand in its user. -bool SystemZTTIImpl:: -isFoldableLoad(const LoadInst *Ld, const Instruction *&FoldedValue) { +bool SystemZTTIImpl::isFoldableLoad(const LoadInst *Ld, + const Instruction *&FoldedValue) const { if (!Ld->hasOneUse()) return false; FoldedValue = Ld; @@ -1302,7 +1300,7 @@ InstructionCost SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo, - const Instruction *I) { + const Instruction *I) const { assert(!Src->isVoidTy() && "Invalid type"); // TODO: Handle other cost kinds. @@ -1457,7 +1455,7 @@ inline bool customCostReductions(unsigned Opcode) { InstructionCost SystemZTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional FMF, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { unsigned ScalarBits = Ty->getScalarSizeInBits(); // The following is only for subtargets with vector math, non-ordered // reductions, and reasonable scalar sizes for int and fp add/mul. @@ -1484,7 +1482,7 @@ SystemZTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, InstructionCost SystemZTTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { // Return custom costs only on subtargets with vector enhancements. if (ST->hasVectorEnhancements1()) { unsigned NumVectors = getNumVectorRegs(Ty); @@ -1513,7 +1511,7 @@ getVectorIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, InstructionCost SystemZTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { InstructionCost Cost = getVectorIntrinsicInstrCost( ICA.getID(), ICA.getReturnType(), ICA.getArgTypes()); if (Cost != -1) diff --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h index e64b1f1ccbd93..10b7d5e8f7263 100644 --- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h +++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h @@ -28,7 +28,9 @@ class SystemZTTIImpl : public BasicTTIImplBase { unsigned const LIBCALL_COST = 30; - bool isInt128InVR(Type *Ty) { return Ty->isIntegerTy(128) && ST->hasVector(); } + bool isInt128InVR(Type *Ty) const { + return Ty->isIntegerTy(128) && ST->hasVector(); + } public: explicit SystemZTTIImpl(const SystemZTargetMachine *TM, const Function &F) @@ -89,7 +91,7 @@ class SystemZTTIImpl : public BasicTTIImplBase { const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, - ArrayRef VL = {}); + ArrayRef VL = {}) const; bool supportsEfficientVectorElementLoadStore() { return true; } bool enableInterleavedAccessVectorization() { return true; } @@ -97,52 +99,55 @@ class SystemZTTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = {}, const Instruction *CxtI = nullptr); + ArrayRef Args = {}, + const Instruction *CxtI = nullptr) const; InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef Args = {}, - const Instruction *CxtI = nullptr); - unsigned getVectorTruncCost(Type *SrcTy, Type *DstTy); - unsigned getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy); + const Instruction *CxtI = nullptr) const; + unsigned getVectorTruncCost(Type *SrcTy, Type *DstTy) const; + unsigned getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy) const; unsigned getBoolVecToIntConversionCost(unsigned Opcode, Type *Dst, - const Instruction *I); + const Instruction *I) const; InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; InstructionCost getCmpSelInstrCost( unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; using BaseT::getVectorInstrCost; InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1); - bool isFoldableLoad(const LoadInst *Ld, const Instruction *&FoldedValue); - InstructionCost - getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, - unsigned AddressSpace, TTI::TargetCostKind CostKind, - TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + bool isFoldableLoad(const LoadInst *Ld, + const Instruction *&FoldedValue) const; + InstructionCost getMemoryOpCost( + unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, + TTI::TargetCostKind CostKind, + TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, + const Instruction *I = nullptr) const; InstructionCost getInterleavedMemoryOpCost( unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond = false, bool UseMaskForGaps = false); - InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, - std::optional FMF, - TTI::TargetCostKind CostKind); + InstructionCost + getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, + std::optional FMF, + TTI::TargetCostKind CostKind) const; InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; bool shouldExpandReduction(const IntrinsicInst *II) const; /// @} diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp index e4dc38686a445..4cede67badd8d 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp @@ -53,7 +53,7 @@ TypeSize WebAssemblyTTIImpl::getRegisterBitWidth( InstructionCost WebAssemblyTTIImpl::getArithmeticInstrCost( unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, - ArrayRef Args, const Instruction *CxtI) { + ArrayRef Args, const Instruction *CxtI) const { InstructionCost Cost = BasicTTIImplBase::getArithmeticInstrCost( @@ -81,7 +81,7 @@ InstructionCost WebAssemblyTTIImpl::getArithmeticInstrCost( InstructionCost WebAssemblyTTIImpl::getCastInstrCost( unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, - TTI::TargetCostKind CostKind, const Instruction *I) { + TTI::TargetCostKind CostKind, const Instruction *I) const { int ISD = TLI->InstructionOpcodeToISD(Opcode); auto SrcTy = TLI->getValueType(DL, Src); auto DstTy = TLI->getValueType(DL, Dst); @@ -144,7 +144,7 @@ InstructionCost WebAssemblyTTIImpl::getCastInstrCost( InstructionCost WebAssemblyTTIImpl::getMemoryOpCost( unsigned Opcode, Type *Ty, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo, - const Instruction *I) { + const Instruction *I) const { if (!ST->hasSIMD128() || !isa(Ty)) { return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace, CostKind); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h index ba66306374c6c..c4291f7d023a8 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h @@ -65,17 +65,18 @@ class WebAssemblyTTIImpl final : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = {}, const Instruction *CxtI = nullptr); + ArrayRef Args = {}, + const Instruction *CxtI = nullptr) const; InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; InstructionCost getMemoryOpCost( unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; using BaseT::getVectorInstrCost; InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index 41e437a47ba29..025a08aa75180 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -246,8 +246,7 @@ unsigned X86TTIImpl::getMaxInterleaveFactor(ElementCount VF) { InstructionCost X86TTIImpl::getArithmeticInstrCost( unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, - ArrayRef Args, - const Instruction *CxtI) { + ArrayRef Args, const Instruction *CxtI) const { // vXi8 multiplications are always promoted to vXi16. // Sub-128-bit types can be extended/packed more efficiently. @@ -1526,7 +1525,7 @@ X86TTIImpl::getAltInstrCost(VectorType *VecTy, unsigned Opcode0, InstructionCost X86TTIImpl::getShuffleCost( TTI::ShuffleKind Kind, VectorType *BaseTp, ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, - ArrayRef Args, const Instruction *CxtI) { + ArrayRef Args, const Instruction *CxtI) const { // 64-bit packed float vectors (v2f32) are widened to type v4f32. // 64-bit packed integer vectors (v2i32) are widened to type v4i32. std::pair LT = getTypeLegalizationCost(BaseTp); @@ -2271,7 +2270,7 @@ InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { int ISD = TLI->InstructionOpcodeToISD(Opcode); assert(ISD && "Invalid opcode"); @@ -3296,7 +3295,7 @@ InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, InstructionCost X86TTIImpl::getCmpSelInstrCost( unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, - TTI::OperandValueInfo Op2Info, const Instruction *I) { + TTI::OperandValueInfo Op2Info, const Instruction *I) const { // Early out if this type isn't scalar/vector integer/float. if (!(ValTy->isIntOrIntVectorTy() || ValTy->isFPOrFPVectorTy())) return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, @@ -3596,7 +3595,7 @@ unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; } InstructionCost X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { // Costs should match the codegen from: // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll @@ -4917,7 +4916,7 @@ InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, InstructionCost X86TTIImpl::getScalarizationOverhead( VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, - TTI::TargetCostKind CostKind, ArrayRef VL) { + TTI::TargetCostKind CostKind, ArrayRef VL) const { assert(DemandedElts.getBitWidth() == cast(Ty)->getNumElements() && "Vector size mismatch"); @@ -5081,7 +5080,7 @@ InstructionCost X86TTIImpl::getScalarizationOverhead( InstructionCost X86TTIImpl::getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { const unsigned EltTyBits = DL.getTypeSizeInBits(EltTy); // We don't differentiate element types here, only element bit width. EltTy = IntegerType::getIntNTy(EltTy->getContext(), EltTyBits); @@ -5189,7 +5188,7 @@ InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo, - const Instruction *I) { + const Instruction *I) const { // TODO: Handle other cost kinds. if (CostKind != TTI::TCK_RecipThroughput) { if (auto *SI = dyn_cast_or_null(I)) { @@ -5357,7 +5356,7 @@ InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, InstructionCost X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment, unsigned AddressSpace, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { bool IsLoad = (Instruction::Load == Opcode); bool IsStore = (Instruction::Store == Opcode); @@ -5472,7 +5471,7 @@ InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty, InstructionCost X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, std::optional FMF, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { if (TTI::requiresOrderedReduction(FMF)) return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); @@ -5721,7 +5720,7 @@ X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, InstructionCost X86TTIImpl::getMinMaxCost(Intrinsic::ID IID, Type *Ty, TTI::TargetCostKind CostKind, - FastMathFlags FMF) { + FastMathFlags FMF) const { IntrinsicCostAttributes ICA(IID, Ty, {Ty, Ty}, FMF); return getIntrinsicInstrCost(ICA, CostKind); } @@ -5729,7 +5728,7 @@ InstructionCost X86TTIImpl::getMinMaxCost(Intrinsic::ID IID, Type *Ty, InstructionCost X86TTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *ValTy, FastMathFlags FMF, - TTI::TargetCostKind CostKind) { + TTI::TargetCostKind CostKind) const { std::pair LT = getTypeLegalizationCost(ValTy); MVT MTy = LT.second; @@ -6089,7 +6088,7 @@ InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I) { + const Instruction *I) const { if (CostKind != TTI::TCK_RecipThroughput) return Opcode == Instruction::PHI ? TTI::TCC_Free : TTI::TCC_Basic; // Branches are assumed to be predicted. @@ -6121,7 +6120,7 @@ InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, TTI::TargetCostKind CostKind, Type *SrcVTy, const Value *Ptr, Align Alignment, - unsigned AddressSpace) { + unsigned AddressSpace) const { assert(isa(SrcVTy) && "Unexpected type in getGSVectorCost"); unsigned VF = cast(SrcVTy)->getNumElements(); @@ -6192,7 +6191,7 @@ InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, InstructionCost X86TTIImpl::getGatherScatterOpCost( unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr) { + const Instruction *I = nullptr) const { if ((Opcode == Instruction::Load && (!isLegalMaskedGather(SrcVTy, Align(Alignment)) || forceScalarizeMaskedGather(cast(SrcVTy), @@ -6255,7 +6254,7 @@ static bool isLegalMaskedLoadStore(Type *ScalarTy, const X86Subtarget *ST) { } bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment, - unsigned AddressSpace) { + unsigned AddressSpace) const { Type *ScalarTy = DataTy->getScalarType(); // The backend can't handle a single element vector w/o CFCMOV. @@ -6268,7 +6267,7 @@ bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment, } bool X86TTIImpl::isLegalMaskedStore(Type *DataTy, Align Alignment, - unsigned AddressSpace) { + unsigned AddressSpace) const { Type *ScalarTy = DataTy->getScalarType(); // The backend can't handle a single element vector w/o CFCMOV. @@ -6358,7 +6357,8 @@ bool X86TTIImpl::supportsGather() const { return ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2()); } -bool X86TTIImpl::forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) { +bool X86TTIImpl::forceScalarizeMaskedGather(VectorType *VTy, + Align Alignment) const { // Gather / Scatter for vector 2 is not profitable on KNL / SKX // Vector-4 of gather/scatter instruction does not exist on KNL. We can extend // it to 8 elements, but zeroing upper bits of the mask vector will add more @@ -6370,7 +6370,8 @@ bool X86TTIImpl::forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) { (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX()))); } -bool X86TTIImpl::isLegalMaskedGatherScatter(Type *DataTy, Align Alignment) { +bool X86TTIImpl::isLegalMaskedGatherScatter(Type *DataTy, + Align Alignment) const { Type *ScalarTy = DataTy->getScalarType(); if (ScalarTy->isPointerTy()) return true; @@ -6385,7 +6386,7 @@ bool X86TTIImpl::isLegalMaskedGatherScatter(Type *DataTy, Align Alignment) { return IntWidth == 32 || IntWidth == 64; } -bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) { +bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) const { if (!supportsGather() || !ST->preferGather()) return false; return isLegalMaskedGatherScatter(DataTy, Alignment); @@ -6424,7 +6425,7 @@ bool X86TTIImpl::isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, return false; } -bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) { +bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) const { // AVX2 doesn't support scatter if (!ST->hasAVX512() || !ST->preferScatter()) return false; diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h index 5b6204d665206..707ca2862cded 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.h +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h @@ -140,7 +140,8 @@ class X86TTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = {}, const Instruction *CxtI = nullptr); + ArrayRef Args = {}, + const Instruction *CxtI = nullptr) const; InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, @@ -151,17 +152,17 @@ class X86TTIImpl : public BasicTTIImplBase { TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef Args = {}, - const Instruction *CxtI = nullptr); + const Instruction *CxtI = nullptr) const; InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; InstructionCost getCmpSelInstrCost( unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; using BaseT::getVectorInstrCost; InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, @@ -170,24 +171,24 @@ class X86TTIImpl : public BasicTTIImplBase { const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, - ArrayRef VL = {}); + ArrayRef VL = {}) const; InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, - TTI::TargetCostKind CostKind); - InstructionCost - getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, - unsigned AddressSpace, TTI::TargetCostKind CostKind, - TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, - const Instruction *I = nullptr); + TTI::TargetCostKind CostKind) const; + InstructionCost getMemoryOpCost( + unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, + TTI::TargetCostKind CostKind, + TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, + const Instruction *I = nullptr) const; InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, - const Instruction *I); + const Instruction *I) const; InstructionCost getPointersChainCost(ArrayRef Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, @@ -211,19 +212,20 @@ class X86TTIImpl : public BasicTTIImplBase { unsigned getAtomicMemIntrinsicMaxElementSize() const; InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; - InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, - std::optional FMF, - TTI::TargetCostKind CostKind); + InstructionCost + getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, + std::optional FMF, + TTI::TargetCostKind CostKind) const; InstructionCost getMinMaxCost(Intrinsic::ID IID, Type *Ty, TTI::TargetCostKind CostKind, - FastMathFlags FMF); + FastMathFlags FMF) const; InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, - TTI::TargetCostKind CostKind); + TTI::TargetCostKind CostKind) const; InstructionCost getInterleavedMemoryOpCost( unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef Indices, @@ -241,7 +243,7 @@ class X86TTIImpl : public BasicTTIImplBase { TTI::TargetCostKind CostKind); InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + const Instruction *I = nullptr) const; InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, @@ -263,19 +265,19 @@ class X86TTIImpl : public BasicTTIImplBase { const TargetTransformInfo::LSRCost &C2); bool canMacroFuseCmp(); bool isLegalMaskedLoad(Type *DataType, Align Alignment, - unsigned AddressSpace); + unsigned AddressSpace) const; bool isLegalMaskedStore(Type *DataType, Align Alignment, - unsigned AddressSpace); + unsigned AddressSpace) const; bool isLegalNTLoad(Type *DataType, Align Alignment); bool isLegalNTStore(Type *DataType, Align Alignment); bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const; - bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment); - bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) { + bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const; + bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const { return forceScalarizeMaskedGather(VTy, Alignment); } - bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment); - bool isLegalMaskedGather(Type *DataType, Align Alignment); - bool isLegalMaskedScatter(Type *DataType, Align Alignment); + bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) const; + bool isLegalMaskedGather(Type *DataType, Align Alignment) const; + bool isLegalMaskedScatter(Type *DataType, Align Alignment) const; bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment); bool isLegalMaskedCompressStore(Type *DataType, Align Alignment); bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, @@ -313,7 +315,7 @@ class X86TTIImpl : public BasicTTIImplBase { bool supportsGather() const; InstructionCost getGSVectorCost(unsigned Opcode, TTI::TargetCostKind CostKind, Type *DataTy, const Value *Ptr, - Align Alignment, unsigned AddressSpace); + Align Alignment, unsigned AddressSpace) const; int getGatherOverhead() const; int getScatterOverhead() const;