Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 9 additions & 6 deletions llvm/lib/Target/X86/X86FixupInstTuning.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -222,8 +222,9 @@ bool X86FixupInstTuningPass::processInstruction(
return ProcessUNPCKToIntDomain(NewOpc);
};

auto ProcessBLENDToMOV = [&](unsigned MovOpc) -> bool {
if (MI.getOperand(NumOperands - 1).getImm() != 1)
auto ProcessBLENDToMOV = [&](unsigned MovOpc, unsigned Mask,
unsigned MovImm) -> bool {
if ((MI.getOperand(NumOperands - 1).getImm() & Mask) != MovImm)
return false;
bool Force = MF.getFunction().hasOptSize();
if (!Force && !NewOpcPreferable(MovOpc))
Expand All @@ -235,14 +236,16 @@ bool X86FixupInstTuningPass::processInstruction(

switch (Opc) {
case X86::BLENDPDrri:
return ProcessBLENDToMOV(X86::MOVSDrr);
return ProcessBLENDToMOV(X86::MOVSDrr, 0x3, 0x1);
case X86::VBLENDPDrri:
return ProcessBLENDToMOV(X86::VMOVSDrr);
return ProcessBLENDToMOV(X86::VMOVSDrr, 0x3, 0x1);

case X86::BLENDPSrri:
return ProcessBLENDToMOV(X86::MOVSSrr);
return ProcessBLENDToMOV(X86::MOVSSrr, 0xF, 0x1) ||
ProcessBLENDToMOV(X86::MOVSDrr, 0xF, 0x3);
case X86::VBLENDPSrri:
return ProcessBLENDToMOV(X86::VMOVSSrr);
return ProcessBLENDToMOV(X86::VMOVSSrr, 0xF, 0x1) ||
ProcessBLENDToMOV(X86::VMOVSDrr, 0xF, 0x3);

case X86::VPERMILPDri:
return ProcessVPERMILPDri(X86::VSHUFPDrri);
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/X86/avx-insertelt.ll
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ define <4 x double> @insert_f64_firstelt_of_high_subvector(<4 x double> %x, doub
; AVX-LABEL: insert_f64_firstelt_of_high_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = xmm1[0],xmm2[1]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
;
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
Original file line number Diff line number Diff line change
Expand Up @@ -300,8 +300,8 @@ declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32) nounwind readnone
define <2 x double> @test_x86_sse41_blendpd(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_sse41_blendpd:
; CHECK: # %bb.0:
; CHECK-NEXT: vblendps $3, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x03]
; CHECK-NEXT: # xmm0 = xmm0[0,1],xmm1[2,3]
; CHECK-NEXT: vmovsd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf3,0x10,0xc0]
; CHECK-NEXT: # xmm0 = xmm0[0],xmm1[1]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i8 2) ; <<2 x double>> [#uses=1]
ret <2 x double> %res
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/X86/coalesce_commute_movsd.ll
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@ define <2 x double> @insert_f64(double %a0, <2 x double> %a1) {
;
; AVX-LABEL: insert_f64:
; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX-NEXT: retq
;
; AVX512-LABEL: insert_f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX512-NEXT: retq
%1 = insertelement <2 x double> %a1, double %a0, i32 0
ret <2 x double> %1
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/X86/combine-and.ll
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ define <4 x i32> @test7(<4 x i32> %A) {
; SSE-LABEL: test7:
; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: test7:
Expand Down
175 changes: 70 additions & 105 deletions llvm/test/CodeGen/X86/combine-or-shuffle.ll
Original file line number Diff line number Diff line change
Expand Up @@ -31,15 +31,10 @@ define <2 x i64> @test1(<2 x i64> %a, <2 x i64> %b) {


define <4 x i32> @test2(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test2:
; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE4-LABEL: test2:
; SSE4: # %bb.0:
; SSE4-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; SSE4-NEXT: retq
; SSE-LABEL: test2:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: test2:
; AVX: # %bb.0:
Expand All @@ -53,15 +48,10 @@ define <4 x i32> @test2(<4 x i32> %a, <4 x i32> %b) {


define <2 x i64> @test3(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test3:
; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE4-LABEL: test3:
; SSE4: # %bb.0:
; SSE4-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; SSE4-NEXT: retq
; SSE-LABEL: test3:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: test3:
; AVX: # %bb.0:
Expand Down Expand Up @@ -201,15 +191,10 @@ define <2 x i64> @test8(<2 x i64> %a, <2 x i64> %b) {


define <4 x i32> @test9(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test9:
; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE4-LABEL: test9:
; SSE4: # %bb.0:
; SSE4-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; SSE4-NEXT: retq
; SSE-LABEL: test9:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: test9:
; AVX: # %bb.0:
Expand All @@ -223,15 +208,10 @@ define <4 x i32> @test9(<4 x i32> %a, <4 x i32> %b) {


define <2 x i64> @test10(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test10:
; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE4-LABEL: test10:
; SSE4: # %bb.0:
; SSE4-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; SSE4-NEXT: retq
; SSE-LABEL: test10:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: test10:
; AVX: # %bb.0:
Expand Down Expand Up @@ -563,20 +543,25 @@ define <2 x i64> @test21(<2 x i64> %a, <2 x i64> %b) {
; bitcast to use the mask-or blend combine.

define <2 x double> @test22(<2 x double> %a0, <2 x double> %a1) {
; SSE2-LABEL: test22:
; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
; SSE-LABEL: test22:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: retq
;
; SSE4-LABEL: test22:
; SSE4: # %bb.0:
; SSE4-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; SSE4-NEXT: retq
; AVX1-LABEL: test22:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX1-NEXT: retq
;
; AVX-LABEL: test22:
; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX-NEXT: retq
; AVX2-LABEL: test22:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX2-NEXT: retq
;
; AVX512-LABEL: test22:
; AVX512: # %bb.0:
; AVX512-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
Comment on lines +561 to +563
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why this not been replaced?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it shows the scheduler checks are working :) we use -mcpu=x86-64-v4 and skylakeserver model prefers vblendps to vmovsd - I can replace the -mcpu with raw -mattr if you prefer?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see. I think it's better to keep it as is to show the coverage.

; AVX512-NEXT: retq
%bc1 = bitcast <2 x double> %a0 to <2 x i64>
%bc2 = bitcast <2 x double> %a1 to <2 x i64>
%and1 = and <2 x i64> %bc1, <i64 0, i64 -1>
Expand Down Expand Up @@ -614,20 +599,25 @@ define <4 x float> @test23(<4 x float> %a0, <4 x float> %a1) {


define <4 x float> @test24(<4 x float> %a0, <4 x float> %a1) {
; SSE2-LABEL: test24:
; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
; SSE-LABEL: test24:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: retq
;
; SSE4-LABEL: test24:
; SSE4: # %bb.0:
; SSE4-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; SSE4-NEXT: retq
; AVX1-LABEL: test24:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX1-NEXT: retq
;
; AVX-LABEL: test24:
; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX-NEXT: retq
; AVX2-LABEL: test24:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX2-NEXT: retq
;
; AVX512-LABEL: test24:
; AVX512: # %bb.0:
; AVX512-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX512-NEXT: retq
%bc1 = bitcast <4 x float> %a0 to <2 x i64>
%bc2 = bitcast <4 x float> %a1 to <2 x i64>
%and1 = and <2 x i64> %bc1, <i64 0, i64 -1>
Expand Down Expand Up @@ -707,15 +697,10 @@ define <4 x i8> @test_crash(<4 x i8> %a, <4 x i8> %b) {
; Verify that we can fold regardless of which operand is the zeroinitializer

define <4 x i32> @test2b(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test2b:
; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE4-LABEL: test2b:
; SSE4: # %bb.0:
; SSE4-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; SSE4-NEXT: retq
; SSE-LABEL: test2b:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: test2b:
; AVX: # %bb.0:
Expand All @@ -728,15 +713,10 @@ define <4 x i32> @test2b(<4 x i32> %a, <4 x i32> %b) {
}

define <4 x i32> @test2c(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test2c:
; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE4-LABEL: test2c:
; SSE4: # %bb.0:
; SSE4-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; SSE4-NEXT: retq
; SSE-LABEL: test2c:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: test2c:
; AVX: # %bb.0:
Expand All @@ -750,15 +730,10 @@ define <4 x i32> @test2c(<4 x i32> %a, <4 x i32> %b) {


define <4 x i32> @test2d(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test2d:
; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE4-LABEL: test2d:
; SSE4: # %bb.0:
; SSE4-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; SSE4-NEXT: retq
; SSE-LABEL: test2d:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: test2d:
; AVX: # %bb.0:
Expand All @@ -773,15 +748,10 @@ define <4 x i32> @test2d(<4 x i32> %a, <4 x i32> %b) {
; Make sure we can have an undef where an index pointing to the zero vector should be

define <4 x i32> @test2e(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test2e:
; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE4-LABEL: test2e:
; SSE4: # %bb.0:
; SSE4-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; SSE4-NEXT: retq
; SSE-LABEL: test2e:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: test2e:
; AVX: # %bb.0:
Expand All @@ -794,15 +764,10 @@ define <4 x i32> @test2e(<4 x i32> %a, <4 x i32> %b) {
}

define <4 x i32> @test2f(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test2f:
; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE4-LABEL: test2f:
; SSE4: # %bb.0:
; SSE4-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; SSE4-NEXT: retq
; SSE-LABEL: test2f:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: test2f:
; AVX: # %bb.0:
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/X86/commute-blend-sse41.ll
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ define void @baz(ptr %arg, ptr %arg1) optsize {
; CHECK-NEXT: movaps (%rdi), %xmm0
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [3,3]
; CHECK-NEXT: andps %xmm0, %xmm1
; CHECK-NEXT: blendps {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3]
; CHECK-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; CHECK-NEXT: movups %xmm1, (%rsi)
; CHECK-NEXT: retq
bb:
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/X86/horizontal-sum.ll
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,7 @@ define <4 x float> @sequential_sum_v4f32_v4f32(<4 x float> %0, <4 x float> %1, <
; AVX-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm4[0,2],xmm1[0,1]
; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[3,3]
; AVX-SLOW-NEXT: vblendps {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3]
; AVX-SLOW-NEXT: vmovsd {{.*#+}} xmm2 = xmm5[0],xmm2[1]
; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm4 = xmm3[1,1,3,3]
; AVX-SLOW-NEXT: vaddps %xmm3, %xmm4, %xmm4
; AVX-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
Expand All @@ -596,7 +596,7 @@ define <4 x float> @sequential_sum_v4f32_v4f32(<4 x float> %0, <4 x float> %1, <
; AVX-FAST-NEXT: vhaddps %xmm2, %xmm2, %xmm1
; AVX-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[0,2],xmm1[0,1]
; AVX-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[3,3]
; AVX-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3]
; AVX-FAST-NEXT: vmovsd {{.*#+}} xmm2 = xmm5[0],xmm2[1]
; AVX-FAST-NEXT: vhaddps %xmm3, %xmm3, %xmm4
; AVX-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
; AVX-FAST-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[2]
Expand Down
Loading
Loading