Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 41 additions & 0 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5155,6 +5155,28 @@ static SDValue lowerShuffleViaVRegSplitting(ShuffleVectorSDNode *SVN,
return convertFromScalableVector(VT, Vec, DAG, Subtarget);
}

// Matches a subset of compress masks with a contiguous prefix of output
// elements. This could be extended to allow gaps by deciding which
// source elements to spuriously demand.
static bool isCompressMask(ArrayRef<int> Mask) {
int Last = -1;
bool SawUndef = false;
for (int i = 0; i < Mask.size(); i++) {
if (Mask[i] == -1) {
SawUndef = true;
continue;
}
if (SawUndef)
return false;
if (i > Mask[i])
return false;
if (Mask[i] <= Last)
return false;
Last = Mask[i];
}
return true;
}

static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
SDValue V1 = Op.getOperand(0);
Expand Down Expand Up @@ -5372,6 +5394,25 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
if (SDValue V = lowerVECTOR_SHUFFLEAsRotate(SVN, DAG, Subtarget))
return V;

// Can we generate a vcompress instead of a vrgather? These scale better
// at high LMUL, at the cost of not being able to fold a following select
// into them. The mask constants are also smaller than the index vector
// constants, and thus easier to materialize.
if (isCompressMask(Mask)) {
SmallVector<SDValue> MaskVals(NumElts,
DAG.getConstant(false, DL, XLenVT));
for (auto Idx : Mask) {
if (Idx == -1)
break;
assert(Idx >= 0 && (unsigned)Idx < NumElts);
MaskVals[Idx] = DAG.getConstant(true, DL, XLenVT);
}
MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
SDValue CompressMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
return DAG.getNode(ISD::VECTOR_COMPRESS, DL, VT, V1, CompressMask,
DAG.getUNDEF(VT));
}

if (VT.getScalarSizeInBits() == 8 &&
any_of(Mask, [&](const auto &Idx) { return Idx > 255; })) {
// On such a vector we're unable to use i8 as the index type.
Expand Down
14 changes: 7 additions & 7 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,16 @@ define <4 x float> @hang_when_merging_stores_after_legalization(<8 x float> %x,
; CHECK-LABEL: hang_when_merging_stores_after_legalization:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vid.v v12
; CHECK-NEXT: vmv.v.i v12, -14
; CHECK-NEXT: vid.v v14
; CHECK-NEXT: li a0, 7
; CHECK-NEXT: vmadd.vx v14, a0, v12
; CHECK-NEXT: li a0, 129
; CHECK-NEXT: vmv.s.x v15, a0
; CHECK-NEXT: vmv.v.i v0, 12
; CHECK-NEXT: vmul.vx v14, v12, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vrgatherei16.vv v12, v8, v14
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vadd.vi v8, v14, -14
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vrgatherei16.vv v12, v10, v8, v0.t
; CHECK-NEXT: vcompress.vm v12, v8, v15
; CHECK-NEXT: vrgatherei16.vv v12, v10, v14, v0.t
; CHECK-NEXT: vmv1r.v v8, v12
; CHECK-NEXT: ret
%z = shufflevector <8 x float> %x, <8 x float> %y, <4 x i32> <i32 0, i32 7, i32 8, i32 15>
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
Original file line number Diff line number Diff line change
Expand Up @@ -138,17 +138,17 @@ define <4 x double> @vrgather_shuffle_xv_v4f64(<4 x double> %x) {
define <4 x double> @vrgather_shuffle_vx_v4f64(<4 x double> %x) {
; CHECK-LABEL: vrgather_shuffle_vx_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vid.v v10
; CHECK-NEXT: lui a0, %hi(.LCPI9_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI9_0)(a0)
; CHECK-NEXT: li a0, 3
; CHECK-NEXT: vmul.vx v12, v10, a0
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.i v10, 9
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT: vcompress.vm v12, v8, v10
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.i v0, 3
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vfmv.v.f v10, fa5
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT: vfmv.v.f v8, fa5
; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x double> %x, <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x i32> <i32 0, i32 3, i32 6, i32 5>
ret <4 x double> %s
Expand Down
63 changes: 29 additions & 34 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
Original file line number Diff line number Diff line change
Expand Up @@ -113,14 +113,12 @@ define <4 x i16> @vrgather_shuffle_xv_v4i16(<4 x i16> %x) {
define <4 x i16> @vrgather_shuffle_vx_v4i16(<4 x i16> %x) {
; CHECK-LABEL: vrgather_shuffle_vx_v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vid.v v9
; CHECK-NEXT: li a0, 3
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v9, 9
; CHECK-NEXT: vmv.v.i v0, 3
; CHECK-NEXT: vmul.vx v10, v9, a0
; CHECK-NEXT: vmv.v.i v9, 5
; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vcompress.vm v10, v8, v9
; CHECK-NEXT: vmv.v.i v8, 5
; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x i16> %x, <4 x i16> <i16 5, i16 5, i16 5, i16 5>, <4 x i32> <i32 0, i32 3, i32 6, i32 5>
ret <4 x i16> %s
Expand Down Expand Up @@ -723,21 +721,22 @@ define <8 x i32> @shuffle_v8i32_2(<8 x i32> %x, <8 x i32> %y) {
define <8 x i8> @shuffle_v64i8_v8i8(<64 x i8> %wide.vec) {
; CHECK-LABEL: shuffle_v64i8_v8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: lui a0, 4112
; CHECK-NEXT: li a1, 240
; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: lui a1, 98561
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vid.v v12
; CHECK-NEXT: vsll.vi v14, v12, 3
; CHECK-NEXT: vrgather.vv v12, v8, v14
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: addi a1, a1, -2048
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: addi a0, a0, 257
; CHECK-NEXT: vmv.s.x v14, a0
; CHECK-NEXT: lui a0, 98561
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcompress.vm v12, v8, v14
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: addi a0, a0, -2048
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a1
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv.v.x v10, a0
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vrgather.vv v12, v8, v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v12
; CHECK-NEXT: ret
Expand All @@ -748,11 +747,10 @@ define <8 x i8> @shuffle_v64i8_v8i8(<64 x i8> %wide.vec) {
define <8 x i8> @shuffle_compress_singlesrc_e8(<8 x i8> %v) {
; CHECK-LABEL: shuffle_compress_singlesrc_e8:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI49_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI49_0)
; CHECK-NEXT: li a0, 181
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vle8.v v10, (a0)
; CHECK-NEXT: vrgather.vv v9, v8, v10
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vcompress.vm v9, v8, v10
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%out = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 5, i32 7, i32 undef, i32 undef, i32 undef>
Expand All @@ -762,11 +760,10 @@ define <8 x i8> @shuffle_compress_singlesrc_e8(<8 x i8> %v) {
define <8 x i16> @shuffle_compress_singlesrc_e16(<8 x i16> %v) {
; CHECK-LABEL: shuffle_compress_singlesrc_e16:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI50_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI50_0)
; CHECK-NEXT: li a0, 181
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v10, (a0)
; CHECK-NEXT: vrgather.vv v9, v8, v10
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vcompress.vm v9, v8, v10
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%out = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 5, i32 7, i32 undef, i32 undef, i32 undef>
Expand All @@ -776,11 +773,10 @@ define <8 x i16> @shuffle_compress_singlesrc_e16(<8 x i16> %v) {
define <8 x i32> @shuffle_compress_singlesrc_e32(<8 x i32> %v) {
; CHECK-LABEL: shuffle_compress_singlesrc_e32:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI51_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI51_0)
; CHECK-NEXT: li a0, 115
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vle16.v v12, (a0)
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vcompress.vm v10, v8, v12
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%out = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 6, i32 undef, i32 undef, i32 undef>
Expand All @@ -790,11 +786,10 @@ define <8 x i32> @shuffle_compress_singlesrc_e32(<8 x i32> %v) {
define <8 x i64> @shuffle_compress_singlesrc_e64(<8 x i64> %v) {
; CHECK-LABEL: shuffle_compress_singlesrc_e64:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI52_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI52_0)
; CHECK-NEXT: li a0, 181
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; CHECK-NEXT: vle16.v v16, (a0)
; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vcompress.vm v12, v8, v16
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%out = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 5, i32 7, i32 undef, i32 undef, i32 undef>
Expand Down
Loading