Skip to content

Commit 1dfd5c9

Browse files
committed
[X86][AVX] combineHorizOpWithShuffle - support target shuffles in HOP(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(HOP(X,Y))
Be more aggressive on (AVX2+) folds of lane shuffles of 256-bit horizontal ops by working on target/faux shuffles as well.
1 parent 4017c6f commit 1dfd5c9

File tree

3 files changed

+77
-50
lines changed

3 files changed

+77
-50
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 26 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -43114,30 +43114,32 @@ static SDValue combineHorizOpWithShuffle(SDNode *N, SelectionDAG &DAG,
4311443114
// Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(HOP(X,Y)).
4311543115
// TODO: Relax shuffle scaling to support sub-128-bit subvector shuffles.
4311643116
if (VT.is256BitVector() && Subtarget.hasInt256()) {
43117-
if (auto *SVN0 = dyn_cast<ShuffleVectorSDNode>(N0)) {
43118-
if (auto *SVN1 = dyn_cast<ShuffleVectorSDNode>(N1)) {
43119-
SmallVector<int, 2> ShuffleMask0, ShuffleMask1;
43120-
if (scaleShuffleElements(SVN0->getMask(), 2, ShuffleMask0) &&
43121-
scaleShuffleElements(SVN1->getMask(), 2, ShuffleMask1)) {
43122-
SDValue Op00 = SVN0->getOperand(0);
43123-
SDValue Op01 = SVN0->getOperand(1);
43124-
SDValue Op10 = SVN1->getOperand(0);
43125-
SDValue Op11 = SVN1->getOperand(1);
43126-
if ((Op00 == Op11) && (Op01 == Op10)) {
43127-
std::swap(Op10, Op11);
43128-
ShuffleVectorSDNode::commuteMask(ShuffleMask1);
43129-
}
43130-
if ((Op00 == Op10) && (Op01 == Op11)) {
43131-
SmallVector<int, 4> ShuffleMask;
43132-
ShuffleMask.append(ShuffleMask0.begin(), ShuffleMask0.end());
43133-
ShuffleMask.append(ShuffleMask1.begin(), ShuffleMask1.end());
43134-
SDLoc DL(N);
43135-
MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
43136-
SDValue Res = DAG.getNode(Opcode, DL, VT, Op00, Op01);
43137-
Res = DAG.getBitcast(ShufVT, Res);
43138-
Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ShuffleMask);
43139-
return DAG.getBitcast(VT, Res);
43140-
}
43117+
SmallVector<int> Mask0, Mask1;
43118+
SmallVector<SDValue> Ops0, Ops1;
43119+
if (getTargetShuffleInputs(N0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
43120+
getTargetShuffleInputs(N1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
43121+
!Ops0.empty() && !Ops1.empty()) {
43122+
SDValue Op00 = Ops0.front(), Op01 = Ops0.back();
43123+
SDValue Op10 = Ops1.front(), Op11 = Ops1.back();
43124+
SmallVector<int, 2> ShuffleMask0, ShuffleMask1;
43125+
if (Op00.getValueType() == SrcVT && Op01.getValueType() == SrcVT &&
43126+
Op11.getValueType() == SrcVT && Op11.getValueType() == SrcVT &&
43127+
scaleShuffleElements(Mask0, 2, ShuffleMask0) &&
43128+
scaleShuffleElements(Mask1, 2, ShuffleMask1)) {
43129+
if ((Op00 == Op11) && (Op01 == Op10)) {
43130+
std::swap(Op10, Op11);
43131+
ShuffleVectorSDNode::commuteMask(ShuffleMask1);
43132+
}
43133+
if ((Op00 == Op10) && (Op01 == Op11)) {
43134+
SmallVector<int, 4> ShuffleMask;
43135+
ShuffleMask.append(ShuffleMask0.begin(), ShuffleMask0.end());
43136+
ShuffleMask.append(ShuffleMask1.begin(), ShuffleMask1.end());
43137+
SDLoc DL(N);
43138+
MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
43139+
SDValue Res = DAG.getNode(Opcode, DL, VT, Op00, Op01);
43140+
Res = DAG.getBitcast(ShufVT, Res);
43141+
Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ShuffleMask);
43142+
return DAG.getBitcast(VT, Res);
4314143143
}
4314243144
}
4314343145
}

llvm/test/CodeGen/X86/haddsub-2.ll

Lines changed: 24 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -444,12 +444,18 @@ define <4 x double> @avx_vhadd_pd_test(<4 x double> %A, <4 x double> %B) {
444444
; SSE-NEXT: movapd %xmm2, %xmm1
445445
; SSE-NEXT: retq
446446
;
447-
; AVX-LABEL: avx_vhadd_pd_test:
448-
; AVX: # %bb.0:
449-
; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
450-
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
451-
; AVX-NEXT: vhaddpd %ymm2, %ymm0, %ymm0
452-
; AVX-NEXT: retq
447+
; AVX1-LABEL: avx_vhadd_pd_test:
448+
; AVX1: # %bb.0:
449+
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
450+
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
451+
; AVX1-NEXT: vhaddpd %ymm2, %ymm0, %ymm0
452+
; AVX1-NEXT: retq
453+
;
454+
; AVX2-LABEL: avx_vhadd_pd_test:
455+
; AVX2: # %bb.0:
456+
; AVX2-NEXT: vhaddpd %ymm1, %ymm0, %ymm0
457+
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
458+
; AVX2-NEXT: retq
453459
%vecext = extractelement <4 x double> %A, i32 0
454460
%vecext1 = extractelement <4 x double> %A, i32 1
455461
%add = fadd double %vecext, %vecext1
@@ -477,12 +483,18 @@ define <4 x double> @avx_vhsub_pd_test(<4 x double> %A, <4 x double> %B) {
477483
; SSE-NEXT: movapd %xmm2, %xmm1
478484
; SSE-NEXT: retq
479485
;
480-
; AVX-LABEL: avx_vhsub_pd_test:
481-
; AVX: # %bb.0:
482-
; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
483-
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
484-
; AVX-NEXT: vhsubpd %ymm2, %ymm0, %ymm0
485-
; AVX-NEXT: retq
486+
; AVX1-LABEL: avx_vhsub_pd_test:
487+
; AVX1: # %bb.0:
488+
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
489+
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
490+
; AVX1-NEXT: vhsubpd %ymm2, %ymm0, %ymm0
491+
; AVX1-NEXT: retq
492+
;
493+
; AVX2-LABEL: avx_vhsub_pd_test:
494+
; AVX2: # %bb.0:
495+
; AVX2-NEXT: vhsubpd %ymm1, %ymm0, %ymm0
496+
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
497+
; AVX2-NEXT: retq
486498
%vecext = extractelement <4 x double> %A, i32 0
487499
%vecext1 = extractelement <4 x double> %A, i32 1
488500
%sub = fsub double %vecext, %vecext1

llvm/test/CodeGen/X86/haddsub-undef.ll

Lines changed: 27 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX1-SLOW
55
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX1-FAST
66
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX512,AVX512-SLOW
7-
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX512
7+
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX512,AVX512-FAST
88

99
; Verify that we correctly fold horizontal binop even in the presence of UNDEFs.
1010

@@ -1190,13 +1190,20 @@ define <4 x double> @PR34724_add_v4f64_u123(<4 x double> %0, <4 x double> %1) {
11901190
; AVX-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
11911191
; AVX-SLOW-NEXT: retq
11921192
;
1193-
; AVX-FAST-LABEL: PR34724_add_v4f64_u123:
1194-
; AVX-FAST: # %bb.0:
1195-
; AVX-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
1196-
; AVX-FAST-NEXT: vblendpd {{.*#+}} ymm2 = ymm0[0,1],ymm1[2,3]
1197-
; AVX-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1198-
; AVX-FAST-NEXT: vhaddpd %ymm2, %ymm0, %ymm0
1199-
; AVX-FAST-NEXT: retq
1193+
; AVX1-FAST-LABEL: PR34724_add_v4f64_u123:
1194+
; AVX1-FAST: # %bb.0:
1195+
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
1196+
; AVX1-FAST-NEXT: vblendpd {{.*#+}} ymm2 = ymm0[0,1],ymm1[2,3]
1197+
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1198+
; AVX1-FAST-NEXT: vhaddpd %ymm2, %ymm0, %ymm0
1199+
; AVX1-FAST-NEXT: retq
1200+
;
1201+
; AVX512-FAST-LABEL: PR34724_add_v4f64_u123:
1202+
; AVX512-FAST: # %bb.0:
1203+
; AVX512-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
1204+
; AVX512-FAST-NEXT: vhaddpd %ymm1, %ymm0, %ymm0
1205+
; AVX512-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,0,3]
1206+
; AVX512-FAST-NEXT: retq
12001207
%3 = shufflevector <4 x double> %0, <4 x double> %1, <2 x i32> <i32 2, i32 4>
12011208
%4 = shufflevector <4 x double> %0, <4 x double> %1, <2 x i32> <i32 3, i32 5>
12021209
%5 = fadd <2 x double> %3, %4
@@ -1286,12 +1293,18 @@ define <4 x double> @PR34724_add_v4f64_01u3(<4 x double> %0, <4 x double> %1) {
12861293
; AVX-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
12871294
; AVX-SLOW-NEXT: retq
12881295
;
1289-
; AVX-FAST-LABEL: PR34724_add_v4f64_01u3:
1290-
; AVX-FAST: # %bb.0:
1291-
; AVX-FAST-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
1292-
; AVX-FAST-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
1293-
; AVX-FAST-NEXT: vhaddpd %ymm2, %ymm0, %ymm0
1294-
; AVX-FAST-NEXT: retq
1296+
; AVX1-FAST-LABEL: PR34724_add_v4f64_01u3:
1297+
; AVX1-FAST: # %bb.0:
1298+
; AVX1-FAST-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
1299+
; AVX1-FAST-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
1300+
; AVX1-FAST-NEXT: vhaddpd %ymm2, %ymm0, %ymm0
1301+
; AVX1-FAST-NEXT: retq
1302+
;
1303+
; AVX512-FAST-LABEL: PR34724_add_v4f64_01u3:
1304+
; AVX512-FAST: # %bb.0:
1305+
; AVX512-FAST-NEXT: vhaddpd %ymm1, %ymm0, %ymm0
1306+
; AVX512-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,1,3]
1307+
; AVX512-FAST-NEXT: retq
12951308
%3 = shufflevector <4 x double> %0, <4 x double> undef, <2 x i32> <i32 0, i32 2>
12961309
%4 = shufflevector <4 x double> %0, <4 x double> undef, <2 x i32> <i32 1, i32 3>
12971310
%5 = fadd <2 x double> %3, %4

0 commit comments

Comments
 (0)