Skip to content

[LV] Convert gather loads with invariant stride into strided loads #147297

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 3 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 18 additions & 10 deletions llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4183,7 +4183,7 @@ void LoopVectorizationPlanner::emitInvalidCostRemarks(
[](const auto *R) { return Instruction::Select; })
.Case<VPWidenStoreRecipe>(
[](const auto *R) { return Instruction::Store; })
.Case<VPWidenLoadRecipe>(
.Case<VPWidenLoadRecipe, VPWidenStridedLoadRecipe>(
[](const auto *R) { return Instruction::Load; })
.Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
[](const auto *R) { return Instruction::Call; })
Expand Down Expand Up @@ -4282,6 +4282,7 @@ static bool willGenerateVectors(VPlan &Plan, ElementCount VF,
case VPDef::VPWidenPointerInductionSC:
case VPDef::VPReductionPHISC:
case VPDef::VPInterleaveSC:
case VPDef::VPWidenStridedLoadSC:
case VPDef::VPWidenLoadEVLSC:
case VPDef::VPWidenLoadSC:
case VPDef::VPWidenStoreEVLSC:
Expand Down Expand Up @@ -7769,7 +7770,10 @@ VPRecipeBuilder::tryToWidenMemory(Instruction *I, ArrayRef<VPValue *> Operands,
new VPVectorEndPointerRecipe(Ptr, &Plan.getVF(), getLoadStoreType(I),
/*Stride*/ -1, Flags, I->getDebugLoc());
} else {
VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I),
const DataLayout &DL = I->getDataLayout();
auto *StrideTy = DL.getIndexType(Ptr->getUnderlyingValue()->getType());
VPValue *StrideOne = Plan.getOrAddLiveIn(ConstantInt::get(StrideTy, 1));
VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I), StrideOne,
GEP ? GEP->getNoWrapFlags()
: GEPNoWrapFlags::none(),
I->getDebugLoc());
Expand Down Expand Up @@ -8908,20 +8912,15 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
// Adjust the recipes for any inloop reductions.
adjustRecipesForReductions(Plan, RecipeBuilder, Range.Start);

VPCostContext CostCtx(CM.TTI, *CM.TLI, Legal->getWidestInductionType(), CM,
CM.CostKind);
// Transform recipes to abstract recipes if it is legal and beneficial and
// clamp the range for better cost estimation.
// TODO: Enable following transform when the EVL-version of extended-reduction
// and mulacc-reduction are implemented.
if (!CM.foldTailWithEVL()) {
VPCostContext CostCtx(CM.TTI, *CM.TLI, Legal->getWidestInductionType(), CM,
CM.CostKind);
if (!CM.foldTailWithEVL())
VPlanTransforms::runPass(VPlanTransforms::convertToAbstractRecipes, *Plan,
CostCtx, Range);
}

for (ElementCount VF : Range)
Plan->addVF(VF);
Plan->setName("Initial VPlan");

// Interleave memory: for each Interleave Group we marked earlier as relevant
// for this VPlan, replace the Recipes widening its memory instructions with a
Expand All @@ -8930,6 +8929,15 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
InterleaveGroups, RecipeBuilder,
CM.isScalarEpilogueAllowed());

// Convert memory recipes to strided access recipes if the strided access is
// legal and profitable.
VPlanTransforms::runPass(VPlanTransforms::convertToStridedAccesses, *Plan,
CostCtx, Range);

for (ElementCount VF : Range)
Plan->addVF(VF);
Plan->setName("Initial VPlan");

// Replace VPValues for known constant strides guaranteed by predicate scalar
// evolution.
auto CanUseVersionedStride = [&Plan](VPUser &U, unsigned) {
Expand Down
95 changes: 73 additions & 22 deletions llvm/lib/Transforms/Vectorize/VPlan.h
Original file line number Diff line number Diff line change
Expand Up @@ -557,6 +557,7 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPValue {
case VPRecipeBase::VPBranchOnMaskSC:
case VPRecipeBase::VPInterleaveSC:
case VPRecipeBase::VPIRInstructionSC:
case VPRecipeBase::VPWidenStridedLoadSC:
case VPRecipeBase::VPWidenLoadEVLSC:
case VPRecipeBase::VPWidenLoadSC:
case VPRecipeBase::VPWidenStoreEVLSC:
Expand Down Expand Up @@ -1643,20 +1644,6 @@ struct VPWidenSelectRecipe : public VPRecipeWithIRFlags, public VPIRMetadata {

/// A recipe for handling GEP instructions.
class VPWidenGEPRecipe : public VPRecipeWithIRFlags {
bool isPointerLoopInvariant() const {
return getOperand(0)->isDefinedOutsideLoopRegions();
}

bool isIndexLoopInvariant(unsigned I) const {
return getOperand(I + 1)->isDefinedOutsideLoopRegions();
}

bool areAllOperandsInvariant() const {
return all_of(operands(), [](VPValue *Op) {
return Op->isDefinedOutsideLoopRegions();
});
}

public:
VPWidenGEPRecipe(GetElementPtrInst *GEP, ArrayRef<VPValue *> Operands)
: VPRecipeWithIRFlags(VPDef::VPWidenGEPSC, Operands, *GEP) {
Expand All @@ -1675,6 +1662,20 @@ class VPWidenGEPRecipe : public VPRecipeWithIRFlags {

VP_CLASSOF_IMPL(VPDef::VPWidenGEPSC)

bool isPointerLoopInvariant() const {
return getOperand(0)->isDefinedOutsideLoopRegions();
}

bool isIndexLoopInvariant(unsigned I) const {
return getOperand(I + 1)->isDefinedOutsideLoopRegions();
}

bool areAllOperandsInvariant() const {
return all_of(operands(), [](VPValue *Op) {
return Op->isDefinedOutsideLoopRegions();
});
}

/// Generate the gep nodes.
void execute(VPTransformState &State) override;

Expand Down Expand Up @@ -1763,20 +1764,23 @@ class VPVectorEndPointerRecipe : public VPRecipeWithIRFlags,
#endif
};

/// A recipe to compute the pointers for widened memory accesses of IndexTy.
/// A recipe to compute the pointers for widened memory accesses of IndexedTy,
/// with the Stride expressed in units of IndexedTy.
class VPVectorPointerRecipe : public VPRecipeWithIRFlags,
public VPUnrollPartAccessor<1> {
public VPUnrollPartAccessor<2> {
Type *IndexedTy;

public:
VPVectorPointerRecipe(VPValue *Ptr, Type *IndexedTy, GEPNoWrapFlags GEPFlags,
DebugLoc DL)
: VPRecipeWithIRFlags(VPDef::VPVectorPointerSC, ArrayRef<VPValue *>(Ptr),
GEPFlags, DL),
VPVectorPointerRecipe(VPValue *Ptr, Type *IndexedTy, VPValue *Stride,
GEPNoWrapFlags GEPFlags, DebugLoc DL)
: VPRecipeWithIRFlags(VPDef::VPVectorPointerSC,
ArrayRef<VPValue *>({Ptr, Stride}), GEPFlags, DL),
IndexedTy(IndexedTy) {}

VP_CLASSOF_IMPL(VPDef::VPVectorPointerSC)

VPValue *getStride() const { return getOperand(1); }

void execute(VPTransformState &State) override;

bool onlyFirstLaneUsed(const VPValue *Op) const override {
Expand All @@ -1794,7 +1798,7 @@ class VPVectorPointerRecipe : public VPRecipeWithIRFlags,
}

VPVectorPointerRecipe *clone() override {
return new VPVectorPointerRecipe(getOperand(0), IndexedTy,
return new VPVectorPointerRecipe(getOperand(0), IndexedTy, getStride(),
getGEPNoWrapFlags(), getDebugLoc());
}

Expand Down Expand Up @@ -2928,7 +2932,8 @@ class VPWidenMemoryRecipe : public VPRecipeBase, public VPIRMetadata {
return R->getVPDefID() == VPRecipeBase::VPWidenLoadSC ||
R->getVPDefID() == VPRecipeBase::VPWidenStoreSC ||
R->getVPDefID() == VPRecipeBase::VPWidenLoadEVLSC ||
R->getVPDefID() == VPRecipeBase::VPWidenStoreEVLSC;
R->getVPDefID() == VPRecipeBase::VPWidenStoreEVLSC ||
R->getVPDefID() == VPRecipeBase::VPWidenStridedLoadSC;
}

static inline bool classof(const VPUser *U) {
Expand Down Expand Up @@ -3047,6 +3052,52 @@ struct VPWidenLoadEVLRecipe final : public VPWidenMemoryRecipe, public VPValue {
}
};

/// A recipe for strided load operations, using the base address, stride, and an
/// optional mask. This recipe will generate an vp.strided.load intrinsic call
/// to represent memory accesses with a fixed stride.
struct VPWidenStridedLoadRecipe final : public VPWidenMemoryRecipe,
public VPValue {
VPWidenStridedLoadRecipe(LoadInst &Load, VPValue *Addr, VPValue *Stride,
VPValue *VF, VPValue *Mask,
const VPIRMetadata &Metadata, DebugLoc DL)
: VPWidenMemoryRecipe(
VPDef::VPWidenStridedLoadSC, Load, {Addr, Stride, VF},
/*Consecutive=*/false, /*Reverse=*/false, Metadata, DL),
VPValue(this, &Load) {
setMask(Mask);
}

VPWidenStridedLoadRecipe *clone() override {
return new VPWidenStridedLoadRecipe(cast<LoadInst>(Ingredient), getAddr(),
getStride(), getVF(), getMask(), *this,
getDebugLoc());
}

VP_CLASSOF_IMPL(VPDef::VPWidenStridedLoadSC);

/// Return the stride operand.
VPValue *getStride() const { return getOperand(1); }

/// Return the VF operand.
VPValue *getVF() const { return getOperand(2); }

/// Generate a strided load.
void execute(VPTransformState &State) override;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif

/// Returns true if the recipe only uses the first lane of operand \p Op.
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
return Op == getAddr() || Op == getStride() || Op == getVF();
}
};

/// A recipe for widening store operations, using the stored value, the address
/// to store to and an optional mask.
struct VPWidenStoreRecipe final : public VPWidenMemoryRecipe {
Expand Down
6 changes: 4 additions & 2 deletions llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -184,8 +184,10 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenCallRecipe *R) {
}

Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenMemoryRecipe *R) {
assert((isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(R)) &&
"Store recipes should not define any values");
assert(
(isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe, VPWidenStridedLoadRecipe>(
R)) &&
"Store recipes should not define any values");
return cast<LoadInst>(&R->getIngredient())->getType();
}

Expand Down
75 changes: 69 additions & 6 deletions llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ bool VPRecipeBase::mayWriteToMemory() const {
case VPWidenCastSC:
case VPWidenGEPSC:
case VPWidenIntOrFpInductionSC:
case VPWidenStridedLoadSC:
case VPWidenLoadEVLSC:
case VPWidenLoadSC:
case VPWidenPHISC:
Expand All @@ -103,6 +104,7 @@ bool VPRecipeBase::mayReadFromMemory() const {
return cast<VPExpressionRecipe>(this)->mayReadOrWriteMemory();
case VPInstructionSC:
return cast<VPInstruction>(this)->opcodeMayReadOrWriteFromMemory();
case VPWidenStridedLoadSC:
case VPWidenLoadEVLSC:
case VPWidenLoadSC:
return true;
Expand Down Expand Up @@ -184,6 +186,7 @@ bool VPRecipeBase::mayHaveSideEffects() const {
}
case VPInterleaveSC:
return mayWriteToMemory();
case VPWidenStridedLoadSC:
case VPWidenLoadEVLSC:
case VPWidenLoadSC:
case VPWidenStoreEVLSC:
Expand Down Expand Up @@ -2379,13 +2382,22 @@ void VPVectorEndPointerRecipe::print(raw_ostream &O, const Twine &Indent,
void VPVectorPointerRecipe::execute(VPTransformState &State) {
auto &Builder = State.Builder;
unsigned CurrentPart = getUnrollPart(*this);
Type *IndexTy = getGEPIndexTy(State.VF.isScalable(), /*IsReverse*/ false,
/*IsUnitStride*/ true, CurrentPart, Builder);
Value *Stride = State.get(getStride(), /*IsScalar*/ true);

auto *StrideC = dyn_cast<ConstantInt>(Stride);
bool IsStrideOne = StrideC && StrideC->isOne();
bool IsUnitStride = IsStrideOne || (StrideC && StrideC->isMinusOne());
Type *IndexTy =
getGEPIndexTy(State.VF.isScalable(),
/*IsReverse*/ false, IsUnitStride, CurrentPart, Builder);
Value *Ptr = State.get(getOperand(0), VPLane(0));

Stride = Builder.CreateSExtOrTrunc(Stride, IndexTy);
Value *Increment = createStepForVF(Builder, IndexTy, State.VF, CurrentPart);
Value *Index = IsStrideOne ? Increment : Builder.CreateMul(Increment, Stride);

Value *ResultPtr =
Builder.CreateGEP(IndexedTy, Ptr, Increment, "", getGEPNoWrapFlags());
Builder.CreateGEP(IndexedTy, Ptr, Index, "", getGEPNoWrapFlags());

State.set(this, ResultPtr, /*IsScalar*/ true);
}
Expand Down Expand Up @@ -3063,9 +3075,11 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
unsigned AS = cast<PointerType>(Ctx.Types.inferScalarType(getAddr()))
->getAddressSpace();
unsigned Opcode = isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(this)
? Instruction::Load
: Instruction::Store;
unsigned Opcode =
isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe, VPWidenStridedLoadRecipe>(
this)
? Instruction::Load
: Instruction::Store;

if (!Consecutive) {
// TODO: Using the original IR may not be accurate.
Expand All @@ -3074,6 +3088,11 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
const Value *Ptr = getLoadStorePointerOperand(&Ingredient);
assert(!Reverse &&
"Inconsecutive memory access should not have the order.");

if (isa<VPWidenStridedLoadRecipe>(this))
return Ctx.TTI.getStridedMemoryOpCost(
Opcode, Ty, Ptr, IsMasked, Alignment, Ctx.CostKind, &Ingredient);

return Ctx.TTI.getAddressComputationCost(Ty) +
Ctx.TTI.getGatherScatterOpCost(Opcode, Ty, Ptr, IsMasked, Alignment,
Ctx.CostKind, &Ingredient);
Expand Down Expand Up @@ -3224,6 +3243,50 @@ void VPWidenLoadEVLRecipe::print(raw_ostream &O, const Twine &Indent,
}
#endif

void VPWidenStridedLoadRecipe::execute(VPTransformState &State) {
Type *ScalarDataTy = getLoadStoreType(&Ingredient);
auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
const Align Alignment = getLoadStoreAlignment(&Ingredient);

auto &Builder = State.Builder;
Value *Addr = State.get(getAddr(), /*IsScalar*/ true);
Value *Stride = State.get(getStride(), /*IsScalar*/ true);
Value *Mask = nullptr;
if (VPValue *VPMask = getMask())
Mask = State.get(VPMask);
else
Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
Value *RunTimeVF = Builder.CreateZExtOrTrunc(State.get(getVF(), VPLane(0)),
Builder.getInt32Ty());

auto *PtrTy = Addr->getType();
auto *StrideTy = Stride->getType();
const DataLayout &DL = Ingredient.getDataLayout();
Value *StrideInBytes = Builder.CreateMul(
Stride, ConstantInt::get(StrideTy, DL.getTypeAllocSize(ScalarDataTy)));
CallInst *NewLI = Builder.CreateIntrinsic(
Intrinsic::experimental_vp_strided_load, {DataTy, PtrTy, StrideTy},
{Addr, StrideInBytes, Mask, RunTimeVF}, nullptr, "wide.strided.load");
NewLI->addParamAttr(
0, Attribute::getWithAlignment(NewLI->getContext(), Alignment));
applyMetadata(*NewLI);
State.set(this, NewLI);
}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void VPWidenStridedLoadRecipe::print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const {
O << Indent << "WIDEN ";
printAsOperand(O, SlotTracker);
O << " = load ";
getAddr()->printAsOperand(O, SlotTracker);
O << ", stride = ";
getStride()->printAsOperand(O, SlotTracker);
O << ", runtimeVF = ";
getVF()->printAsOperand(O, SlotTracker);
}
#endif

void VPWidenStoreRecipe::execute(VPTransformState &State) {
VPValue *StoredVPValue = getStoredValue();
bool CreateScatter = !isConsecutive();
Expand Down
Loading
Loading