Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16294,6 +16294,41 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
// because targets may prefer a wider type during later combines and invert
// this transform.
switch (N0.getOpcode()) {
case ISD::AVGCEILU:
case ISD::AVGFLOORU:
if (!LegalOperations && N0.hasOneUse() &&
TLI.isOperationLegal(N0.getOpcode(), VT)) {
SDValue X = N0.getOperand(0);
SDValue Y = N0.getOperand(1);
unsigned SrcBits = X.getScalarValueSizeInBits();
unsigned DstBits = VT.getScalarSizeInBits();
unsigned MaxBitsX = DAG.ComputeMaxSignificantBits(X);
unsigned MaxBitsY = DAG.ComputeMaxSignificantBits(Y);
if (MaxBitsX <= DstBits && MaxBitsY <= DstBits) {
SDValue Tx = DAG.getNode(ISD::TRUNCATE, DL, VT, X);
SDValue Ty = DAG.getNode(ISD::TRUNCATE, DL, VT, Y);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

NeededLeadingZeros = SrcBits - DstBits; ? (NeededSignBits is correct though you could use ComputeMaxSignificantBits instead if you wish)

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry I think you misunderstood - you need to use computeKnownBits.countMinLeadingZeros() >= (SrcBits - DstBits)

return DAG.getNode(N0.getOpcode(), DL, VT, Tx, Ty);
}
}
break;
case ISD::AVGCEILS:
case ISD::AVGFLOORS:
if (!LegalOperations && N0.hasOneUse() &&
TLI.isOperationLegal(N0.getOpcode(), VT)) {
SDValue X = N0.getOperand(0);
SDValue Y = N0.getOperand(1);
unsigned SignBitsX = DAG.ComputeNumSignBits(X);
unsigned SignBitsY = DAG.ComputeNumSignBits(Y);
unsigned SrcBits = X.getScalarValueSizeInBits();
unsigned DstBits = VT.getScalarSizeInBits();
unsigned NeededSignBits = SrcBits - DstBits + 1;
if (SignBitsX >= NeededSignBits && SignBitsY >= NeededSignBits) {
SDValue Tx = DAG.getNode(ISD::TRUNCATE, DL, VT, X);
SDValue Ty = DAG.getNode(ISD::TRUNCATE, DL, VT, Y);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

 if (DAG.ComputeNumSignBits(X) >= NeededSignBits &&
     DAG.ComputeNumSignBits(Y) >= NeededSignBits) {

return DAG.getNode(N0.getOpcode(), DL, VT, Tx, Ty);
}
}
break;
case ISD::ADD:
case ISD::SUB:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You should be able to reuse the ISD::ABD code later in the switch statement now - its has near-identical logic

case ISD::MUL:
Expand Down
99 changes: 99 additions & 0 deletions llvm/test/CodeGen/AArch64/trunc-avg-fold.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=aarch64-- -O2 -mattr=+neon < %s | FileCheck %s


define <8 x i8> @test_avgceil_u(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_avgceil_u:
; CHECK: // %bb.0:
; CHECK-NEXT: xtn v0.8b, v0.8h
; CHECK-NEXT: xtn v1.8b, v1.8h
; CHECK-NEXT: uhadd v0.8b, v0.8b, v1.8b
; CHECK-NEXT: ret

%mask = insertelement <8 x i16> undef, i16 255, i32 0
%mask.splat = shufflevector <8 x i16> %mask, <8 x i16> undef, <8 x i32> zeroinitializer
%ta16 = and <8 x i16> %a, %mask.splat
%tb16 = and <8 x i16> %b, %mask.splat
%ta8 = trunc <8 x i16> %ta16 to <8 x i8>
%tb8 = trunc <8 x i16> %tb16 to <8 x i8>
%res = call <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8> %ta8, <8 x i8> %tb8)
ret <8 x i8> %res
}


define <8 x i8> @test_avgceil_s(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_avgceil_s:
; CHECK: // %bb.0:
; CHECK-NEXT: sqxtn v0.8b, v0.8h
; CHECK-NEXT: sqxtn v1.8b, v1.8h
; CHECK-NEXT: shadd v0.8b, v0.8b, v1.8b
; CHECK-NEXT: ret

%smin = insertelement <8 x i16> undef, i16 -128, i32 0
%smax = insertelement <8 x i16> undef, i16 127, i32 0
%min = shufflevector <8 x i16> %smin, <8 x i16> undef, <8 x i32> zeroinitializer
%max = shufflevector <8 x i16> %smax, <8 x i16> undef, <8 x i32> zeroinitializer

%ta16 = call <8 x i16> @llvm.smin.v8i16(<8 x i16> %a, <8 x i16> %max)
%ta16.clamped = call <8 x i16> @llvm.smax.v8i16(<8 x i16> %ta16, <8 x i16> %min)
%tb16 = call <8 x i16> @llvm.smin.v8i16(<8 x i16> %b, <8 x i16> %max)
%tb16.clamped = call <8 x i16> @llvm.smax.v8i16(<8 x i16> %tb16, <8 x i16> %min)

%ta8 = trunc <8 x i16> %ta16.clamped to <8 x i8>
%tb8 = trunc <8 x i16> %tb16.clamped to <8 x i8>
%res = call <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8> %ta8, <8 x i8> %tb8)
ret <8 x i8> %res
}


define <8 x i8> @test_avgfloor_u(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_avgfloor_u:
; CHECK: // %bb.0:
; CHECK-NEXT: xtn v0.8b, v0.8h
; CHECK-NEXT: xtn v1.8b, v1.8h
; CHECK-NEXT: urhadd v0.8b, v0.8b, v1.8b
; CHECK-NEXT: ret

%mask = insertelement <8 x i16> undef, i16 255, i32 0
%mask.splat = shufflevector <8 x i16> %mask, <8 x i16> undef, <8 x i32> zeroinitializer
%ta16 = and <8 x i16> %a, %mask.splat
%tb16 = and <8 x i16> %b, %mask.splat
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why not use splat (i16 255)? it was added to avoid the messy shufflevector(insertelement) pattern

%ta8 = trunc <8 x i16> %ta16 to <8 x i8>
%tb8 = trunc <8 x i16> %tb16 to <8 x i8>
%res = call <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8> %ta8, <8 x i8> %tb8)
ret <8 x i8> %res
}


define <8 x i8> @test_avgfloor_s(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_avgfloor_s:
; CHECK: // %bb.0:
; CHECK-NEXT: sqxtn v0.8b, v0.8h
; CHECK-NEXT: sqxtn v1.8b, v1.8h
; CHECK-NEXT: srhadd v0.8b, v0.8b, v1.8b
; CHECK-NEXT: ret

%smin = insertelement <8 x i16> undef, i16 -128, i32 0
%smax = insertelement <8 x i16> undef, i16 127, i32 0
%min = shufflevector <8 x i16> %smin, <8 x i16> undef, <8 x i32> zeroinitializer
%max = shufflevector <8 x i16> %smax, <8 x i16> undef, <8 x i32> zeroinitializer

%ta16 = call <8 x i16> @llvm.smin.v8i16(<8 x i16> %a, <8 x i16> %max)
%ta16.clamped = call <8 x i16> @llvm.smax.v8i16(<8 x i16> %ta16, <8 x i16> %min)
%tb16 = call <8 x i16> @llvm.smin.v8i16(<8 x i16> %b, <8 x i16> %max)
%tb16.clamped = call <8 x i16> @llvm.smax.v8i16(<8 x i16> %tb16, <8 x i16> %min)

%ta8 = trunc <8 x i16> %ta16.clamped to <8 x i8>
%tb8 = trunc <8 x i16> %tb16.clamped to <8 x i8>
%res = call <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8> %ta8, <8 x i8> %tb8)
ret <8 x i8> %res
}

declare <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8>, <8 x i8>)
declare <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8>, <8 x i8>)
declare <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8>, <8 x i8>)
declare <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8>, <8 x i8>)

declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>)
declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>)

Loading