Skip to content

Commit a4a9803

Browse files
authored
[X86] shouldFoldMaskToVariableShiftPair should be true for scalars up to the biggest legal type (#158068)
For X86, we want to do this for scalars up to the biggest legal type. Having it be for types bigger results in bloated code.
1 parent deef49e commit a4a9803

File tree

2 files changed

+214
-5
lines changed

2 files changed

+214
-5
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3659,11 +3659,8 @@ bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
36593659
if (VT.isVector())
36603660
return false;
36613661

3662-
// 64-bit shifts on 32-bit targets produce really bad bloated code.
3663-
if (VT == MVT::i64 && !Subtarget.is64Bit())
3664-
return false;
3665-
3666-
return true;
3662+
unsigned MaxWidth = Subtarget.is64Bit() ? 64 : 32;
3663+
return VT.getScalarSizeInBits() <= MaxWidth;
36673664
}
36683665

36693666
TargetLowering::ShiftLegalizationStrategy
Lines changed: 212 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,212 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86-NOBMI
3+
; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86-BMI2
4+
; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86-BMI2
5+
; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64-NOBMI
6+
; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64-BMI2
7+
; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64-BMI2
8+
9+
define i32 @mask_pair(i32 %x, i32 %y) nounwind {
10+
; X86-NOBMI-LABEL: mask_pair:
11+
; X86-NOBMI: # %bb.0:
12+
; X86-NOBMI-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
13+
; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %eax
14+
; X86-NOBMI-NEXT: shrl %cl, %eax
15+
; X86-NOBMI-NEXT: shll %cl, %eax
16+
; X86-NOBMI-NEXT: retl
17+
;
18+
; X86-BMI2-LABEL: mask_pair:
19+
; X86-BMI2: # %bb.0:
20+
; X86-BMI2-NEXT: movzbl {{[0-9]+}}(%esp), %eax
21+
; X86-BMI2-NEXT: shrxl %eax, {{[0-9]+}}(%esp), %ecx
22+
; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
23+
; X86-BMI2-NEXT: retl
24+
;
25+
; X64-NOBMI-LABEL: mask_pair:
26+
; X64-NOBMI: # %bb.0:
27+
; X64-NOBMI-NEXT: movl %esi, %ecx
28+
; X64-NOBMI-NEXT: movl %edi, %eax
29+
; X64-NOBMI-NEXT: shrl %cl, %eax
30+
; X64-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx
31+
; X64-NOBMI-NEXT: shll %cl, %eax
32+
; X64-NOBMI-NEXT: retq
33+
;
34+
; X64-BMI2-LABEL: mask_pair:
35+
; X64-BMI2: # %bb.0:
36+
; X64-BMI2-NEXT: shrxl %esi, %edi, %eax
37+
; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
38+
; X64-BMI2-NEXT: retq
39+
%shl = shl nsw i32 -1, %y
40+
%and = and i32 %shl, %x
41+
ret i32 %and
42+
}
43+
44+
define i64 @mask_pair_64(i64 %x, i64 %y) nounwind {
45+
; X86-NOBMI-LABEL: mask_pair_64:
46+
; X86-NOBMI: # %bb.0:
47+
; X86-NOBMI-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
48+
; X86-NOBMI-NEXT: movl $-1, %edx
49+
; X86-NOBMI-NEXT: movl $-1, %eax
50+
; X86-NOBMI-NEXT: shll %cl, %eax
51+
; X86-NOBMI-NEXT: testb $32, %cl
52+
; X86-NOBMI-NEXT: je .LBB1_2
53+
; X86-NOBMI-NEXT: # %bb.1:
54+
; X86-NOBMI-NEXT: movl %eax, %edx
55+
; X86-NOBMI-NEXT: xorl %eax, %eax
56+
; X86-NOBMI-NEXT: .LBB1_2:
57+
; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %eax
58+
; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %edx
59+
; X86-NOBMI-NEXT: retl
60+
;
61+
; X86-BMI2-LABEL: mask_pair_64:
62+
; X86-BMI2: # %bb.0:
63+
; X86-BMI2-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
64+
; X86-BMI2-NEXT: movl $-1, %edx
65+
; X86-BMI2-NEXT: shlxl %ecx, %edx, %eax
66+
; X86-BMI2-NEXT: testb $32, %cl
67+
; X86-BMI2-NEXT: je .LBB1_2
68+
; X86-BMI2-NEXT: # %bb.1:
69+
; X86-BMI2-NEXT: movl %eax, %edx
70+
; X86-BMI2-NEXT: xorl %eax, %eax
71+
; X86-BMI2-NEXT: .LBB1_2:
72+
; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
73+
; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
74+
; X86-BMI2-NEXT: retl
75+
;
76+
; X64-NOBMI-LABEL: mask_pair_64:
77+
; X64-NOBMI: # %bb.0:
78+
; X64-NOBMI-NEXT: movq %rsi, %rcx
79+
; X64-NOBMI-NEXT: movq %rdi, %rax
80+
; X64-NOBMI-NEXT: shrq %cl, %rax
81+
; X64-NOBMI-NEXT: # kill: def $cl killed $cl killed $rcx
82+
; X64-NOBMI-NEXT: shlq %cl, %rax
83+
; X64-NOBMI-NEXT: retq
84+
;
85+
; X64-BMI2-LABEL: mask_pair_64:
86+
; X64-BMI2: # %bb.0:
87+
; X64-BMI2-NEXT: shrxq %rsi, %rdi, %rax
88+
; X64-BMI2-NEXT: shlxq %rsi, %rax, %rax
89+
; X64-BMI2-NEXT: retq
90+
%shl = shl nsw i64 -1, %y
91+
%and = and i64 %shl, %x
92+
ret i64 %and
93+
}
94+
95+
define i128 @mask_pair_128(i128 %x, i128 %y) nounwind {
96+
; X86-NOBMI-LABEL: mask_pair_128:
97+
; X86-NOBMI: # %bb.0:
98+
; X86-NOBMI-NEXT: pushl %ebx
99+
; X86-NOBMI-NEXT: pushl %edi
100+
; X86-NOBMI-NEXT: pushl %esi
101+
; X86-NOBMI-NEXT: subl $32, %esp
102+
; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %ecx
103+
; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %eax
104+
; X86-NOBMI-NEXT: movl $-1, {{[0-9]+}}(%esp)
105+
; X86-NOBMI-NEXT: movl $-1, {{[0-9]+}}(%esp)
106+
; X86-NOBMI-NEXT: movl $-1, {{[0-9]+}}(%esp)
107+
; X86-NOBMI-NEXT: movl $-1, {{[0-9]+}}(%esp)
108+
; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
109+
; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
110+
; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
111+
; X86-NOBMI-NEXT: movl $0, (%esp)
112+
; X86-NOBMI-NEXT: movl %ecx, %edx
113+
; X86-NOBMI-NEXT: shrb $3, %dl
114+
; X86-NOBMI-NEXT: andb $12, %dl
115+
; X86-NOBMI-NEXT: negb %dl
116+
; X86-NOBMI-NEXT: movsbl %dl, %ebx
117+
; X86-NOBMI-NEXT: movl 24(%esp,%ebx), %edx
118+
; X86-NOBMI-NEXT: movl 28(%esp,%ebx), %esi
119+
; X86-NOBMI-NEXT: shldl %cl, %edx, %esi
120+
; X86-NOBMI-NEXT: movl 16(%esp,%ebx), %edi
121+
; X86-NOBMI-NEXT: movl 20(%esp,%ebx), %ebx
122+
; X86-NOBMI-NEXT: shldl %cl, %ebx, %edx
123+
; X86-NOBMI-NEXT: shldl %cl, %edi, %ebx
124+
; X86-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx
125+
; X86-NOBMI-NEXT: shll %cl, %edi
126+
; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %edx
127+
; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %esi
128+
; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %edi
129+
; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %ebx
130+
; X86-NOBMI-NEXT: movl %esi, 12(%eax)
131+
; X86-NOBMI-NEXT: movl %edx, 8(%eax)
132+
; X86-NOBMI-NEXT: movl %ebx, 4(%eax)
133+
; X86-NOBMI-NEXT: movl %edi, (%eax)
134+
; X86-NOBMI-NEXT: addl $32, %esp
135+
; X86-NOBMI-NEXT: popl %esi
136+
; X86-NOBMI-NEXT: popl %edi
137+
; X86-NOBMI-NEXT: popl %ebx
138+
; X86-NOBMI-NEXT: retl $4
139+
;
140+
; X86-BMI2-LABEL: mask_pair_128:
141+
; X86-BMI2: # %bb.0:
142+
; X86-BMI2-NEXT: pushl %ebx
143+
; X86-BMI2-NEXT: pushl %edi
144+
; X86-BMI2-NEXT: pushl %esi
145+
; X86-BMI2-NEXT: subl $32, %esp
146+
; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
147+
; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
148+
; X86-BMI2-NEXT: movl $-1, {{[0-9]+}}(%esp)
149+
; X86-BMI2-NEXT: movl $-1, {{[0-9]+}}(%esp)
150+
; X86-BMI2-NEXT: movl $-1, {{[0-9]+}}(%esp)
151+
; X86-BMI2-NEXT: movl $-1, {{[0-9]+}}(%esp)
152+
; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
153+
; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
154+
; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
155+
; X86-BMI2-NEXT: movl $0, (%esp)
156+
; X86-BMI2-NEXT: movl %ecx, %edx
157+
; X86-BMI2-NEXT: shrb $3, %dl
158+
; X86-BMI2-NEXT: andb $12, %dl
159+
; X86-BMI2-NEXT: negb %dl
160+
; X86-BMI2-NEXT: movsbl %dl, %edi
161+
; X86-BMI2-NEXT: movl 24(%esp,%edi), %edx
162+
; X86-BMI2-NEXT: movl 28(%esp,%edi), %esi
163+
; X86-BMI2-NEXT: shldl %cl, %edx, %esi
164+
; X86-BMI2-NEXT: movl 16(%esp,%edi), %ebx
165+
; X86-BMI2-NEXT: movl 20(%esp,%edi), %edi
166+
; X86-BMI2-NEXT: shldl %cl, %edi, %edx
167+
; X86-BMI2-NEXT: shldl %cl, %ebx, %edi
168+
; X86-BMI2-NEXT: shlxl %ecx, %ebx, %ecx
169+
; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
170+
; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %esi
171+
; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %ecx
172+
; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edi
173+
; X86-BMI2-NEXT: movl %esi, 12(%eax)
174+
; X86-BMI2-NEXT: movl %edx, 8(%eax)
175+
; X86-BMI2-NEXT: movl %edi, 4(%eax)
176+
; X86-BMI2-NEXT: movl %ecx, (%eax)
177+
; X86-BMI2-NEXT: addl $32, %esp
178+
; X86-BMI2-NEXT: popl %esi
179+
; X86-BMI2-NEXT: popl %edi
180+
; X86-BMI2-NEXT: popl %ebx
181+
; X86-BMI2-NEXT: retl $4
182+
;
183+
; X64-NOBMI-LABEL: mask_pair_128:
184+
; X64-NOBMI: # %bb.0:
185+
; X64-NOBMI-NEXT: movq %rdx, %rcx
186+
; X64-NOBMI-NEXT: movq $-1, %rdx
187+
; X64-NOBMI-NEXT: movq $-1, %r8
188+
; X64-NOBMI-NEXT: shlq %cl, %r8
189+
; X64-NOBMI-NEXT: xorl %eax, %eax
190+
; X64-NOBMI-NEXT: testb $64, %cl
191+
; X64-NOBMI-NEXT: cmovneq %r8, %rdx
192+
; X64-NOBMI-NEXT: cmoveq %r8, %rax
193+
; X64-NOBMI-NEXT: andq %rdi, %rax
194+
; X64-NOBMI-NEXT: andq %rsi, %rdx
195+
; X64-NOBMI-NEXT: retq
196+
;
197+
; X64-BMI2-LABEL: mask_pair_128:
198+
; X64-BMI2: # %bb.0:
199+
; X64-BMI2-NEXT: movq $-1, %rcx
200+
; X64-BMI2-NEXT: shlxq %rdx, %rcx, %r8
201+
; X64-BMI2-NEXT: xorl %eax, %eax
202+
; X64-BMI2-NEXT: testb $64, %dl
203+
; X64-BMI2-NEXT: cmovneq %r8, %rcx
204+
; X64-BMI2-NEXT: cmoveq %r8, %rax
205+
; X64-BMI2-NEXT: andq %rdi, %rax
206+
; X64-BMI2-NEXT: andq %rsi, %rcx
207+
; X64-BMI2-NEXT: movq %rcx, %rdx
208+
; X64-BMI2-NEXT: retq
209+
%shl = shl nsw i128 -1, %y
210+
%and = and i128 %shl, %x
211+
ret i128 %and
212+
}

0 commit comments

Comments
 (0)