Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 42 additions & 0 deletions llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1456,6 +1456,45 @@ InstCombinerImpl::foldShuffledIntrinsicOperands(IntrinsicInst *II) {
return new ShuffleVectorInst(NewIntrinsic, Mask);
}

/// If all arguments of the intrinsic are reverses, try to pull the reverse
/// after the intrinsic.
Value *InstCombinerImpl::foldReversedIntrinsicOperands(IntrinsicInst *II) {
if (!isTriviallyVectorizable(II->getIntrinsicID()) ||
!II->getCalledFunction()->isSpeculatable())
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need to speculate it? The reverse isn't changing which lanes are active.

return nullptr;

// At least 1 operand must be a reverse with 1 use because we are creating 2
// instructions.
if (none_of(II->args(), [](Value *V) {
return match(V, m_OneUse(m_VecReverse(m_Value())));
}))
return nullptr;

Value *X;
Constant *C;
SmallVector<Value *> NewArgs;
for (Use &Arg : II->args()) {
if (isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(),
Arg.getOperandNo(), nullptr))
NewArgs.push_back(Arg);
else if (match(&Arg, m_VecReverse(m_Value(X))))
NewArgs.push_back(X);
else if (Value *Splat = getSplatValue(Arg))
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hm, since we're inserting a new splat, do we need the existing one to be one-use? You do need to insert a new one instead of reusing because of possible undef/poison lanes.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I looked at the existing binop combine to see what it was doing and it actually doesn't create new splats, it instead checks isSplatValue which doesn't allow undef lanes:

  if (match(LHS, m_VecReverse(m_Value(V1)))) {
    // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
    if (match(RHS, m_VecReverse(m_Value(V2))) &&
        (LHS->hasOneUse() || RHS->hasOneUse() ||
         (LHS == RHS && LHS->hasNUses(2))))
      return createBinOpReverse(V1, V2);

    // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
    if (LHS->hasOneUse() && isSplatValue(RHS))
      return createBinOpReverse(V1, RHS);
  }
  // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
  else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
    return createBinOpReverse(LHS, V2);

So I've done the same here in e0acdf3 to keep it consistent, disallowing poison lanes. I don't think supporting poison lanes is important given that reverse intrinsics are really only used for scalable vectors.

NewArgs.push_back(Builder.CreateVectorSplat(
cast<VectorType>(Arg->getType())->getElementCount(), Splat));
else if (match(&Arg, m_ImmConstant(C)))
NewArgs.push_back(Builder.CreateVectorReverse(C));
else
return nullptr;
}

// intrinsic (reverse X), (reverse Y), ... --> reverse (intrinsic X, Y, ...)
Instruction *FPI = isa<FPMathOperator>(II) ? II : nullptr;
Instruction *NewIntrinsic = Builder.CreateIntrinsic(
II->getType(), II->getIntrinsicID(), NewArgs, FPI);
return Builder.CreateVectorReverse(NewIntrinsic);
}

/// Fold the following cases and accepts bswap and bitreverse intrinsics:
/// bswap(logic_op(bswap(x), y)) --> logic_op(x, bswap(y))
/// bswap(logic_op(bswap(x), bswap(y))) --> logic_op(x, y) (ignores multiuse)
Expand Down Expand Up @@ -3867,6 +3906,9 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
if (Instruction *Shuf = foldShuffledIntrinsicOperands(II))
return Shuf;

if (Value *Reverse = foldReversedIntrinsicOperands(II))
return replaceInstUsesWith(*II, Reverse);

// Some intrinsics (like experimental_gc_statepoint) can be used in invoke
// context, so it is handled in visitCallBase and we should trigger it.
return visitCallBase(*II);
Expand Down
1 change: 1 addition & 0 deletions llvm/lib/Transforms/InstCombine/InstCombineInternal.h
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
Instruction *foldItoFPtoI(CastInst &FI);
Instruction *visitSelectInst(SelectInst &SI);
Instruction *foldShuffledIntrinsicOperands(IntrinsicInst *II);
Value *foldReversedIntrinsicOperands(IntrinsicInst *II);
Instruction *visitCallInst(CallInst &CI);
Instruction *visitInvokeInst(InvokeInst &II);
Instruction *visitCallBrInst(CallBrInst &CBI);
Expand Down
133 changes: 133 additions & 0 deletions llvm/test/Transforms/InstCombine/vector-reverse.ll
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,18 @@ define <vscale x 4 x i32> @binop_reverse(<vscale x 4 x i32> %a, <vscale x 4 x i3
ret <vscale x 4 x i32> %add
}

define <vscale x 4 x i32> @binop_intrinsic_reverse(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: @binop_intrinsic_reverse(
; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> [[A_REV:%.*]], <vscale x 4 x i32> [[B_REV:%.*]])
; CHECK-NEXT: [[SMAX:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[ADD]])
; CHECK-NEXT: ret <vscale x 4 x i32> [[SMAX]]
;
%a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
%b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
%smax = call <vscale x 4 x i32> @llvm.smax(<vscale x 4 x i32> %a.rev, <vscale x 4 x i32> %b.rev)
ret <vscale x 4 x i32> %smax
}

; %a.rev has multiple uses
define <vscale x 4 x i32> @binop_reverse_1(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: @binop_reverse_1(
Expand All @@ -33,6 +45,22 @@ define <vscale x 4 x i32> @binop_reverse_1(<vscale x 4 x i32> %a, <vscale x 4 x
ret <vscale x 4 x i32> %add
}

; %a.rev has multiple uses
define <vscale x 4 x i32> @binop_intrinsic_reverse_1(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: @binop_intrinsic_reverse_1(
; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> [[B]], <vscale x 4 x i32> [[B1:%.*]])
; CHECK-NEXT: [[SMAX:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP1]])
; CHECK-NEXT: ret <vscale x 4 x i32> [[SMAX]]
;
%a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
%b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
call void @use_nxv4i32(<vscale x 4 x i32> %a.rev)
%smax = call <vscale x 4 x i32> @llvm.smax(<vscale x 4 x i32> %a.rev, <vscale x 4 x i32> %b.rev)
ret <vscale x 4 x i32> %smax
}

; %b.rev has multiple uses
define <vscale x 4 x i32> @binop_reverse_2(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: @binop_reverse_2(
Expand Down Expand Up @@ -67,6 +95,24 @@ define <vscale x 4 x i32> @binop_reverse_3(<vscale x 4 x i32> %a, <vscale x 4 x
ret <vscale x 4 x i32> %add
}

; %a.rev and %b.rev have multiple uses
define <vscale x 4 x i32> @binop_intrinsic_reverse_3(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: @binop_intrinsic_reverse_3(
; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[A_REV]])
; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
; CHECK-NEXT: [[SMAX:%.*]] = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> [[A_REV]], <vscale x 4 x i32> [[B_REV]])
; CHECK-NEXT: ret <vscale x 4 x i32> [[SMAX]]
;
%a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
%b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
call void @use_nxv4i32(<vscale x 4 x i32> %a.rev)
call void @use_nxv4i32(<vscale x 4 x i32> %b.rev)
%smax = call <vscale x 4 x i32> @llvm.smax(<vscale x 4 x i32> %a.rev, <vscale x 4 x i32> %b.rev)
ret <vscale x 4 x i32> %smax
}

; %a.rev used as both operands
define <vscale x 4 x i32> @binop_reverse_4(<vscale x 4 x i32> %a) {
; CHECK-LABEL: @binop_reverse_4(
Expand Down Expand Up @@ -184,6 +230,17 @@ define <vscale x 4 x float> @unop_reverse_1(<vscale x 4 x float> %a) {
ret <vscale x 4 x float> %neg
}

define <vscale x 4 x float> @unop_intrinsic_reverse(<vscale x 4 x float> %a) {
; CHECK-LABEL: @unop_intrinsic_reverse(
; CHECK-NEXT: [[NEG:%.*]] = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> [[A_REV:%.*]])
; CHECK-NEXT: [[ABS:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[NEG]])
; CHECK-NEXT: ret <vscale x 4 x float> [[ABS]]
;
%a.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %a)
%abs = call <vscale x 4 x float> @llvm.fabs(<vscale x 4 x float> %a.rev)
ret <vscale x 4 x float> %abs
}

define <vscale x 4 x i1> @icmp_reverse(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: @icmp_reverse(
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
Expand Down Expand Up @@ -629,6 +686,18 @@ define <vscale x 4 x float> @reverse_binop_reverse(<vscale x 4 x float> %a, <vsc
ret <vscale x 4 x float> %add.rev
}

define <vscale x 4 x float> @reverse_binop_intrinsic_reverse(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
; CHECK-LABEL: @reverse_binop_intrinsic_reverse(
; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> [[A_REV:%.*]], <vscale x 4 x float> [[B_REV:%.*]])
; CHECK-NEXT: ret <vscale x 4 x float> [[ADD]]
;
%a.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %a)
%b.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %b)
%maxnum = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> %a.rev, <vscale x 4 x float> %b.rev)
%maxnum.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %maxnum)
ret <vscale x 4 x float> %maxnum.rev
}

define <vscale x 4 x float> @reverse_binop_reverse_splat_RHS(<vscale x 4 x float> %a, float %b) {
; CHECK-LABEL: @reverse_binop_reverse_splat_RHS(
; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0
Expand Down Expand Up @@ -659,6 +728,49 @@ define <vscale x 4 x float> @reverse_binop_reverse_splat_LHS(<vscale x 4 x float
ret <vscale x 4 x float> %div.rev
}

define <vscale x 4 x float> @reverse_binop_reverse_intrinsic_splat_RHS(<vscale x 4 x float> %a, float %b) {
; CHECK-LABEL: @reverse_binop_reverse_intrinsic_splat_RHS(
; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0
; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[B_INSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[MAXNUM:%.*]] = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> [[A_REV:%.*]], <vscale x 4 x float> [[B_SPLAT]])
; CHECK-NEXT: ret <vscale x 4 x float> [[MAXNUM]]
;
%a.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %a)
%b.insert = insertelement <vscale x 4 x float> poison, float %b, i32 0
%b.splat = shufflevector <vscale x 4 x float> %b.insert, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
%maxnum = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> %a.rev, <vscale x 4 x float> %b.splat)
%maxnum.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %maxnum)
ret <vscale x 4 x float> %maxnum.rev
}

define <vscale x 4 x float> @reverse_binop_reverse_intrinsic_splat_LHS(<vscale x 4 x float> %a, float %b) {
; CHECK-LABEL: @reverse_binop_reverse_intrinsic_splat_LHS(
; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0
; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[B_INSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[MAXNUM:%.*]] = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> [[B_SPLAT]], <vscale x 4 x float> [[A_REV:%.*]])
; CHECK-NEXT: ret <vscale x 4 x float> [[MAXNUM]]
;
%a.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %a)
%b.insert = insertelement <vscale x 4 x float> poison, float %b, i32 0
%b.splat = shufflevector <vscale x 4 x float> %b.insert, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
%maxnum = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> %b.splat, <vscale x 4 x float> %a.rev)
%maxnum.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %maxnum)
ret <vscale x 4 x float> %maxnum.rev
}

define <4 x float> @reverse_binop_reverse_intrinsic_constant_RHS(<4 x float> %a) {
; CHECK-LABEL: @reverse_binop_reverse_intrinsic_constant_RHS(
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[A:%.*]], <4 x float> <float 3.000000e+00, float 2.000000e+00, float 1.000000e+00, float 0.000000e+00>)
; CHECK-NEXT: [[MAXNUM:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[MAXNUM_REV:%.*]] = tail call <4 x float> @llvm.vector.reverse.v4f32(<4 x float> [[MAXNUM]])
; CHECK-NEXT: ret <4 x float> [[MAXNUM_REV]]
;
%a.rev = tail call <4 x float> @llvm.vector.reverse(<4 x float> %a)
%maxnum = call <4 x float> @llvm.maxnum.v4f32(<4 x float> <float 0.0, float 1.0, float 2.0, float 3.0>, <4 x float> %a.rev)
%maxnum.rev = tail call <4 x float> @llvm.vector.reverse(<4 x float> %maxnum)
ret <4 x float> %maxnum.rev
}

define <vscale x 4 x i1> @reverse_fcmp_reverse(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
; CHECK-LABEL: @reverse_fcmp_reverse(
; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast olt <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
Expand Down Expand Up @@ -695,6 +807,27 @@ define <vscale x 4 x float> @reverse_unop_reverse(<vscale x 4 x float> %a) {
ret <vscale x 4 x float> %neg.rev
}

define <vscale x 4 x float> @reverse_unop_intrinsic_reverse(<vscale x 4 x float> %a) {
; CHECK-LABEL: @reverse_unop_intrinsic_reverse(
; CHECK-NEXT: [[ABS:%.*]] = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> [[A_REV:%.*]])
; CHECK-NEXT: ret <vscale x 4 x float> [[ABS]]
;
%a.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %a)
%abs = call <vscale x 4 x float> @llvm.fabs(<vscale x 4 x float> %a.rev)
%abs.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %abs)
ret <vscale x 4 x float> %abs.rev
}

define <vscale x 4 x float> @reverse_unop_intrinsic_reverse_scalar_arg(<vscale x 4 x float> %a, i32 %power) {
; CHECK-LABEL: @reverse_unop_intrinsic_reverse_scalar_arg(
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.powi.nxv4f32.i32(<vscale x 4 x float> [[A:%.*]], i32 [[POWER:%.*]])
; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
;
%a.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %a)
%powi = call <vscale x 4 x float> @llvm.powi.nxv4f32(<vscale x 4 x float> %a.rev, i32 %power)
%powi.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %powi)
ret <vscale x 4 x float> %powi.rev
}

declare void @use_nxv4i1(<vscale x 4 x i1>)
declare void @use_nxv4i32(<vscale x 4 x i32>)
Expand Down
Loading