@@ -2701,6 +2701,14 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
27012701 if (!MemR || !isa<VPWidenLoadRecipe>(MemR) || !MemR->isReverse ())
27022702 continue ;
27032703
2704+ auto *VecEndPtr = cast<VPVectorEndPointerRecipe>(MemR->getAddr ());
2705+ VPValue *Ptr = VecEndPtr->getPtr ();
2706+ Value *PtrUV = Ptr->getUnderlyingValue ();
2707+ // Memory cost model requires the pointer operand of memory access
2708+ // instruction.
2709+ if (!PtrUV)
2710+ continue ;
2711+
27042712 Instruction &Ingredient = MemR->getIngredient ();
27052713 Type *ElementTy = getLoadStoreType (&Ingredient);
27062714
@@ -2711,10 +2719,9 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
27112719 return false ;
27122720 const InstructionCost CurrentCost = MemR->computeCost (VF, Ctx);
27132721 const InstructionCost StridedLoadStoreCost =
2714- Ctx.TTI .getStridedMemoryOpCost (
2715- Instruction::Load, DataTy,
2716- getLoadStorePointerOperand (&Ingredient), MemR->isMasked (),
2717- Alignment, Ctx.CostKind , &Ingredient);
2722+ Ctx.TTI .getStridedMemoryOpCost (Instruction::Load, DataTy, PtrUV,
2723+ MemR->isMasked (), Alignment,
2724+ Ctx.CostKind , &Ingredient);
27182725 return StridedLoadStoreCost < CurrentCost;
27192726 };
27202727
@@ -2724,10 +2731,7 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
27242731
27252732 // The stride of consecutive reverse access must be -1.
27262733 int64_t Stride = -1 ;
2727- auto *VecEndPtr = cast<VPVectorEndPointerRecipe>(MemR->getAddr ());
2728- VPValue *Ptr = VecEndPtr->getPtr ();
2729- auto *GEP = dyn_cast<GetElementPtrInst>(
2730- Ptr->getUnderlyingValue ()->stripPointerCasts ());
2734+ auto *GEP = dyn_cast<GetElementPtrInst>(PtrUV->stripPointerCasts ());
27312735 // Create a new vector pointer for strided access.
27322736 auto *NewPtr = new VPVectorPointerRecipe (Ptr, ElementTy, /* Stride=*/ true ,
27332737 GEP ? GEP->getNoWrapFlags ()
0 commit comments