Skip to content

Commit 956ae7c

Browse files
committed
[X86] combine-addo.ll - add common CHECK prefix
1 parent ee1a06b commit 956ae7c

File tree

1 file changed

+30
-64
lines changed

1 file changed

+30
-64
lines changed

llvm/test/CodeGen/X86/combine-addo.ll

Lines changed: 30 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2-
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
3-
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
2+
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE
3+
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX
44

55
declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
66
declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
@@ -10,15 +10,10 @@ declare {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32>, <4 x i32
1010

1111
; fold (sadd x, 0) -> x
1212
define i32 @combine_sadd_zero(i32 %a0, i32 %a1) {
13-
; SSE-LABEL: combine_sadd_zero:
14-
; SSE: # %bb.0:
15-
; SSE-NEXT: movl %edi, %eax
16-
; SSE-NEXT: retq
17-
;
18-
; AVX-LABEL: combine_sadd_zero:
19-
; AVX: # %bb.0:
20-
; AVX-NEXT: movl %edi, %eax
21-
; AVX-NEXT: retq
13+
; CHECK-LABEL: combine_sadd_zero:
14+
; CHECK: # %bb.0:
15+
; CHECK-NEXT: movl %edi, %eax
16+
; CHECK-NEXT: retq
2217
%1 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a0, i32 zeroinitializer)
2318
%2 = extractvalue {i32, i1} %1, 0
2419
%3 = extractvalue {i32, i1} %1, 1
@@ -27,13 +22,9 @@ define i32 @combine_sadd_zero(i32 %a0, i32 %a1) {
2722
}
2823

2924
define <4 x i32> @combine_vec_sadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
30-
; SSE-LABEL: combine_vec_sadd_zero:
31-
; SSE: # %bb.0:
32-
; SSE-NEXT: retq
33-
;
34-
; AVX-LABEL: combine_vec_sadd_zero:
35-
; AVX: # %bb.0:
36-
; AVX-NEXT: retq
25+
; CHECK-LABEL: combine_vec_sadd_zero:
26+
; CHECK: # %bb.0:
27+
; CHECK-NEXT: retq
3728
%1 = call {<4 x i32>, <4 x i1>} @llvm.sadd.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
3829
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
3930
%3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
@@ -43,15 +34,10 @@ define <4 x i32> @combine_vec_sadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
4334

4435
; fold (uadd x, 0) -> x
4536
define i32 @combine_uadd_zero(i32 %a0, i32 %a1) {
46-
; SSE-LABEL: combine_uadd_zero:
47-
; SSE: # %bb.0:
48-
; SSE-NEXT: movl %edi, %eax
49-
; SSE-NEXT: retq
50-
;
51-
; AVX-LABEL: combine_uadd_zero:
52-
; AVX: # %bb.0:
53-
; AVX-NEXT: movl %edi, %eax
54-
; AVX-NEXT: retq
37+
; CHECK-LABEL: combine_uadd_zero:
38+
; CHECK: # %bb.0:
39+
; CHECK-NEXT: movl %edi, %eax
40+
; CHECK-NEXT: retq
5541
%1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a0, i32 zeroinitializer)
5642
%2 = extractvalue {i32, i1} %1, 0
5743
%3 = extractvalue {i32, i1} %1, 1
@@ -60,13 +46,9 @@ define i32 @combine_uadd_zero(i32 %a0, i32 %a1) {
6046
}
6147

6248
define <4 x i32> @combine_vec_uadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
63-
; SSE-LABEL: combine_vec_uadd_zero:
64-
; SSE: # %bb.0:
65-
; SSE-NEXT: retq
66-
;
67-
; AVX-LABEL: combine_vec_uadd_zero:
68-
; AVX: # %bb.0:
69-
; AVX-NEXT: retq
49+
; CHECK-LABEL: combine_vec_uadd_zero:
50+
; CHECK: # %bb.0:
51+
; CHECK-NEXT: retq
7052
%1 = call {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
7153
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
7254
%3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
@@ -76,19 +58,12 @@ define <4 x i32> @combine_vec_uadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
7658

7759
; fold (uadd (xor a, -1), 1) -> (usub 0, a) and flip carry
7860
define i32 @combine_uadd_not(i32 %a0, i32 %a1) {
79-
; SSE-LABEL: combine_uadd_not:
80-
; SSE: # %bb.0:
81-
; SSE-NEXT: movl %edi, %eax
82-
; SSE-NEXT: negl %eax
83-
; SSE-NEXT: cmovael %esi, %eax
84-
; SSE-NEXT: retq
85-
;
86-
; AVX-LABEL: combine_uadd_not:
87-
; AVX: # %bb.0:
88-
; AVX-NEXT: movl %edi, %eax
89-
; AVX-NEXT: negl %eax
90-
; AVX-NEXT: cmovael %esi, %eax
91-
; AVX-NEXT: retq
61+
; CHECK-LABEL: combine_uadd_not:
62+
; CHECK: # %bb.0:
63+
; CHECK-NEXT: movl %edi, %eax
64+
; CHECK-NEXT: negl %eax
65+
; CHECK-NEXT: cmovael %esi, %eax
66+
; CHECK-NEXT: retq
9267
%1 = xor i32 %a0, -1
9368
%2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %1, i32 1)
9469
%3 = extractvalue {i32, i1} %2, 0
@@ -128,23 +103,14 @@ define <4 x i32> @combine_vec_uadd_not(<4 x i32> %a0, <4 x i32> %a1) {
128103

129104
; if uaddo never overflows, replace with add
130105
define i32 @combine_uadd_no_overflow(i32 %a0, i32 %a1, i32 %a2) {
131-
; SSE-LABEL: combine_uadd_no_overflow:
132-
; SSE: # %bb.0:
133-
; SSE-NEXT: # kill: def $edx killed $edx def $rdx
134-
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
135-
; SSE-NEXT: shrl $16, %esi
136-
; SSE-NEXT: shrl $16, %edx
137-
; SSE-NEXT: leal (%rdx,%rsi), %eax
138-
; SSE-NEXT: retq
139-
;
140-
; AVX-LABEL: combine_uadd_no_overflow:
141-
; AVX: # %bb.0:
142-
; AVX-NEXT: # kill: def $edx killed $edx def $rdx
143-
; AVX-NEXT: # kill: def $esi killed $esi def $rsi
144-
; AVX-NEXT: shrl $16, %esi
145-
; AVX-NEXT: shrl $16, %edx
146-
; AVX-NEXT: leal (%rdx,%rsi), %eax
147-
; AVX-NEXT: retq
106+
; CHECK-LABEL: combine_uadd_no_overflow:
107+
; CHECK: # %bb.0:
108+
; CHECK-NEXT: # kill: def $edx killed $edx def $rdx
109+
; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
110+
; CHECK-NEXT: shrl $16, %esi
111+
; CHECK-NEXT: shrl $16, %edx
112+
; CHECK-NEXT: leal (%rdx,%rsi), %eax
113+
; CHECK-NEXT: retq
148114
%1 = lshr i32 %a1, 16
149115
%2 = lshr i32 %a2, 16
150116
%3 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %1, i32 %2)

0 commit comments

Comments
 (0)