Skip to content

Commit 6d6268d

Browse files
committed
Revert "[SLP][X86] Improve reordering to consider alternate instruction bundles"
This reverts commit 6f88acf.
1 parent 6f88acf commit 6d6268d

File tree

7 files changed

+10
-106
lines changed

7 files changed

+10
-106
lines changed

llvm/include/llvm/Analysis/TargetTransformInfo.h

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
2222
#define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
2323

24-
#include "llvm/ADT/SmallBitVector.h"
2524
#include "llvm/IR/FMF.h"
2625
#include "llvm/IR/InstrTypes.h"
2726
#include "llvm/IR/PassManager.h"
@@ -679,16 +678,6 @@ class TargetTransformInfo {
679678
/// Return true if the target supports masked expand load.
680679
bool isLegalMaskedExpandLoad(Type *DataType) const;
681680

682-
/// Return true if this is an alternating opcode pattern that can be lowered
683-
/// to a single instruction on the target. In X86 this is for the addsub
684-
/// instruction which corrsponds to a Shuffle + Fadd + FSub pattern in IR.
685-
/// This function expectes two opcodes: \p Opcode1 and \p Opcode2 being
686-
/// selected by \p OpcodeMask. The mask contains one bit per lane and is a `0`
687-
/// when \p Opcode0 is selected and `1` when Opcode1 is selected.
688-
/// \p VecTy is the vector type of the instruction to be generated.
689-
bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
690-
const SmallBitVector &OpcodeMask) const;
691-
692681
/// Return true if we should be enabling ordered reductions for the target.
693682
bool enableOrderedReductions() const;
694683

@@ -1592,9 +1581,6 @@ class TargetTransformInfo::Concept {
15921581
Align Alignment) = 0;
15931582
virtual bool isLegalMaskedCompressStore(Type *DataType) = 0;
15941583
virtual bool isLegalMaskedExpandLoad(Type *DataType) = 0;
1595-
virtual bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0,
1596-
unsigned Opcode1,
1597-
const SmallBitVector &OpcodeMask) const = 0;
15981584
virtual bool enableOrderedReductions() = 0;
15991585
virtual bool hasDivRemOp(Type *DataType, bool IsSigned) = 0;
16001586
virtual bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) = 0;
@@ -2020,10 +2006,6 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
20202006
bool isLegalMaskedExpandLoad(Type *DataType) override {
20212007
return Impl.isLegalMaskedExpandLoad(DataType);
20222008
}
2023-
bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
2024-
const SmallBitVector &OpcodeMask) const override {
2025-
return Impl.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask);
2026-
}
20272009
bool enableOrderedReductions() override {
20282010
return Impl.enableOrderedReductions();
20292011
}

llvm/include/llvm/Analysis/TargetTransformInfoImpl.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -279,11 +279,6 @@ class TargetTransformInfoImplBase {
279279

280280
bool isLegalMaskedCompressStore(Type *DataType) const { return false; }
281281

282-
bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
283-
const SmallBitVector &OpcodeMask) const {
284-
return false;
285-
}
286-
287282
bool isLegalMaskedExpandLoad(Type *DataType) const { return false; }
288283

289284
bool enableOrderedReductions() const { return false; }

llvm/lib/Analysis/TargetTransformInfo.cpp

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -412,12 +412,6 @@ bool TargetTransformInfo::isLegalMaskedGather(Type *DataType,
412412
return TTIImpl->isLegalMaskedGather(DataType, Alignment);
413413
}
414414

415-
bool TargetTransformInfo::isLegalAltInstr(
416-
VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
417-
const SmallBitVector &OpcodeMask) const {
418-
return TTIImpl->isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask);
419-
}
420-
421415
bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType,
422416
Align Alignment) const {
423417
return TTIImpl->isLegalMaskedScatter(DataType, Alignment);

llvm/lib/Target/X86/X86TargetTransformInfo.cpp

Lines changed: 0 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -5341,39 +5341,6 @@ bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
53415341
return IntWidth == 32 || IntWidth == 64;
53425342
}
53435343

5344-
bool X86TTIImpl::isLegalAltInstr(VectorType *VecTy, unsigned Opcode0,
5345-
unsigned Opcode1,
5346-
const SmallBitVector &OpcodeMask) const {
5347-
// ADDSUBPS 4xf32 SSE3
5348-
// VADDSUBPS 4xf32 AVX
5349-
// VADDSUBPS 8xf32 AVX2
5350-
// ADDSUBPD 2xf64 SSE3
5351-
// VADDSUBPD 2xf64 AVX
5352-
// VADDSUBPD 4xf64 AVX2
5353-
5354-
unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
5355-
assert(OpcodeMask.size() == NumElements && "Mask and VecTy are incompatible");
5356-
if (!isPowerOf2_32(NumElements))
5357-
return false;
5358-
// Check the opcode pattern. We apply the mask on the opcode arguments and
5359-
// then check if it is what we expect.
5360-
for (int Lane : seq<int>(0, NumElements)) {
5361-
unsigned Opc = OpcodeMask.test(Lane) ? Opcode1 : Opcode0;
5362-
// We expect FSub for even lanes and FAdd for odd lanes.
5363-
if (Lane % 2 == 0 && Opc != Instruction::FSub)
5364-
return false;
5365-
if (Lane % 2 == 1 && Opc != Instruction::FAdd)
5366-
return false;
5367-
}
5368-
// Now check that the pattern is supported by the target ISA.
5369-
Type *ElemTy = cast<VectorType>(VecTy)->getElementType();
5370-
if (ElemTy->isFloatTy())
5371-
return ST->hasSSE3() && NumElements % 4 == 0;
5372-
if (ElemTy->isDoubleTy())
5373-
return ST->hasSSE3() && NumElements % 2 == 0;
5374-
return false;
5375-
}
5376-
53775344
bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
53785345
// AVX2 doesn't support scatter
53795346
if (!ST->hasAVX512())

llvm/lib/Target/X86/X86TargetTransformInfo.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -241,8 +241,6 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
241241
bool isLegalMaskedScatter(Type *DataType, Align Alignment);
242242
bool isLegalMaskedExpandLoad(Type *DataType);
243243
bool isLegalMaskedCompressStore(Type *DataType);
244-
bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
245-
const SmallBitVector &OpcodeMask) const;
246244
bool hasDivRemOp(Type *DataType, bool IsSigned);
247245
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty);
248246
bool areInlineCompatible(const Function *Caller,

llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp

Lines changed: 2 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -3710,18 +3710,14 @@ void BoUpSLP::reorderTopToBottom() {
37103710
// their ordering.
37113711
DenseMap<const TreeEntry *, OrdersType> GathersToOrders;
37123712

3713-
// AltShuffles can also have a preferred ordering that leads to fewer
3714-
// instructions, e.g., the addsub instruction in x86.
3715-
DenseMap<const TreeEntry *, OrdersType> AltShufflesToOrders;
3716-
37173713
// Maps a TreeEntry to the reorder indices of external users.
37183714
DenseMap<const TreeEntry *, SmallVector<OrdersType, 1>>
37193715
ExternalUserReorderMap;
37203716
// Find all reorderable nodes with the given VF.
37213717
// Currently the are vectorized stores,loads,extracts + some gathering of
37223718
// extracts.
37233719
for_each(VectorizableTree, [this, &VFToOrderedEntries, &GathersToOrders,
3724-
&ExternalUserReorderMap, &AltShufflesToOrders](
3720+
&ExternalUserReorderMap](
37253721
const std::unique_ptr<TreeEntry> &TE) {
37263722
// Look for external users that will probably be vectorized.
37273723
SmallVector<OrdersType, 1> ExternalUserReorderIndices =
@@ -3732,27 +3728,6 @@ void BoUpSLP::reorderTopToBottom() {
37323728
std::move(ExternalUserReorderIndices));
37333729
}
37343730

3735-
// Patterns like [fadd,fsub] can be combined into a single instruction in
3736-
// x86. Reordering them into [fsub,fadd] blocks this pattern. So we need
3737-
// to take into account their order when looking for the most used order.
3738-
if (TE->isAltShuffle()) {
3739-
VectorType *VecTy =
3740-
FixedVectorType::get(TE->Scalars[0]->getType(), TE->Scalars.size());
3741-
unsigned Opcode0 = TE->getOpcode();
3742-
unsigned Opcode1 = TE->getAltOpcode();
3743-
// The opcode mask selects between the two opcodes.
3744-
SmallBitVector OpcodeMask(TE->Scalars.size(), 0);
3745-
for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size()))
3746-
if (cast<Instruction>(TE->Scalars[Lane])->getOpcode() == Opcode1)
3747-
OpcodeMask.set(Lane);
3748-
// If this pattern is supported by the target then we consider the order.
3749-
if (TTI->isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) {
3750-
VFToOrderedEntries[TE->Scalars.size()].insert(TE.get());
3751-
AltShufflesToOrders.try_emplace(TE.get(), OrdersType());
3752-
}
3753-
// TODO: Check the reverse order too.
3754-
}
3755-
37563731
if (Optional<OrdersType> CurrentOrder =
37573732
getReorderingData(*TE, /*TopToBottom=*/true)) {
37583733
// Do not include ordering for nodes used in the alt opcode vectorization,
@@ -3803,18 +3778,12 @@ void BoUpSLP::reorderTopToBottom() {
38033778
if (!OpTE->ReuseShuffleIndices.empty())
38043779
continue;
38053780
// Count number of orders uses.
3806-
const auto &Order = [OpTE, &GathersToOrders,
3807-
&AltShufflesToOrders]() -> const OrdersType & {
3781+
const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & {
38083782
if (OpTE->State == TreeEntry::NeedToGather) {
38093783
auto It = GathersToOrders.find(OpTE);
38103784
if (It != GathersToOrders.end())
38113785
return It->second;
38123786
}
3813-
if (OpTE->isAltShuffle()) {
3814-
auto It = AltShufflesToOrders.find(OpTE);
3815-
if (It != AltShufflesToOrders.end())
3816-
return It->second;
3817-
}
38183787
return OpTE->ReorderIndices;
38193788
}();
38203789
// First consider the order of the external scalar users.

llvm/test/Transforms/SLPVectorizer/X86/reorder_with_external_users.ll

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -118,21 +118,20 @@ define void @addsub_and_external_users(double *%A, double *%ptr) {
118118
; CHECK-NEXT: [[LD:%.*]] = load double, double* undef, align 8
119119
; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> poison, double [[LD]], i32 0
120120
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[LD]], i32 1
121-
; CHECK-NEXT: [[TMP2:%.*]] = fsub <2 x double> [[TMP1]], <double 1.100000e+00, double 1.200000e+00>
122-
; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP1]], <double 1.100000e+00, double 1.200000e+00>
123-
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> [[TMP3]], <2 x i32> <i32 0, i32 3>
124-
; CHECK-NEXT: [[TMP5:%.*]] = fdiv <2 x double> [[TMP4]], <double 2.100000e+00, double 2.200000e+00>
125-
; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x double> [[TMP5]], <double 3.100000e+00, double 3.200000e+00>
121+
; CHECK-NEXT: [[TMP2:%.*]] = fsub <2 x double> [[TMP1]], <double 1.200000e+00, double 1.100000e+00>
122+
; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP1]], <double 1.200000e+00, double 1.100000e+00>
123+
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> [[TMP3]], <2 x i32> <i32 2, i32 1>
124+
; CHECK-NEXT: [[TMP5:%.*]] = fdiv <2 x double> [[TMP4]], <double 2.200000e+00, double 2.100000e+00>
125+
; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x double> [[TMP5]], <double 3.200000e+00, double 3.100000e+00>
126126
; CHECK-NEXT: [[PTRA0:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 0
127-
; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x double> [[TMP6]], <2 x double> poison, <2 x i32> <i32 1, i32 0>
128127
; CHECK-NEXT: [[TMP7:%.*]] = bitcast double* [[PTRA0]] to <2 x double>*
129-
; CHECK-NEXT: store <2 x double> [[SHUFFLE]], <2 x double>* [[TMP7]], align 8
128+
; CHECK-NEXT: store <2 x double> [[TMP6]], <2 x double>* [[TMP7]], align 8
130129
; CHECK-NEXT: br label [[BB2:%.*]]
131130
; CHECK: bb2:
132-
; CHECK-NEXT: [[TMP8:%.*]] = fadd <2 x double> [[TMP6]], <double 4.100000e+00, double 4.200000e+00>
131+
; CHECK-NEXT: [[TMP8:%.*]] = fadd <2 x double> [[TMP6]], <double 4.200000e+00, double 4.100000e+00>
133132
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x double> [[TMP8]], i32 0
134133
; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x double> [[TMP8]], i32 1
135-
; CHECK-NEXT: [[SEED:%.*]] = fcmp ogt double [[TMP9]], [[TMP10]]
134+
; CHECK-NEXT: [[SEED:%.*]] = fcmp ogt double [[TMP10]], [[TMP9]]
136135
; CHECK-NEXT: ret void
137136
;
138137
bb1:

0 commit comments

Comments
 (0)