Skip to content

Commit dfae162

Browse files
committed
[SLP] Update test naming to avoid FileCheck warnings
This only address the tmp name collision. We still get warnings due to conflicting ASM. This is due to the different target attributes on the function.
1 parent c098f2d commit dfae162

File tree

1 file changed

+44
-44
lines changed

1 file changed

+44
-44
lines changed

llvm/test/Transforms/SLPVectorizer/X86/minimum-sizes.ll

Lines changed: 44 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,13 @@ define i8 @PR31243_zext(i8 %v0, i8 %v1, i8 %v2, i8 %v3, ptr %ptr) {
2020
; SSE-NEXT: [[TMP0:%.*]] = or i8 [[V0:%.*]], 1
2121
; SSE-NEXT: [[TMP1:%.*]] = or i8 [[V1:%.*]], 1
2222
; SSE-NEXT: [[TMP2:%.*]] = zext i8 [[TMP0]] to i64
23-
; SSE-NEXT: [[TMP_4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP2]]
23+
; SSE-NEXT: [[T4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP2]]
2424
; SSE-NEXT: [[TMP3:%.*]] = zext i8 [[TMP1]] to i64
25-
; SSE-NEXT: [[TMP_5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP3]]
26-
; SSE-NEXT: [[TMP_6:%.*]] = load i8, ptr [[TMP_4]], align 1
27-
; SSE-NEXT: [[TMP_7:%.*]] = load i8, ptr [[TMP_5]], align 1
28-
; SSE-NEXT: [[TMP_8:%.*]] = add i8 [[TMP_6]], [[TMP_7]]
29-
; SSE-NEXT: ret i8 [[TMP_8]]
25+
; SSE-NEXT: [[T5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP3]]
26+
; SSE-NEXT: [[T6:%.*]] = load i8, ptr [[T4]], align 1
27+
; SSE-NEXT: [[T7:%.*]] = load i8, ptr [[T5]], align 1
28+
; SSE-NEXT: [[T8:%.*]] = add i8 [[T6]], [[T7]]
29+
; SSE-NEXT: ret i8 [[T8]]
3030
;
3131
; AVX-LABEL: @PR31243_zext(
3232
; AVX-NEXT: entry:
@@ -35,26 +35,26 @@ define i8 @PR31243_zext(i8 %v0, i8 %v1, i8 %v2, i8 %v3, ptr %ptr) {
3535
; AVX-NEXT: [[TMP2:%.*]] = or <2 x i8> [[TMP1]], <i8 1, i8 1>
3636
; AVX-NEXT: [[TMP3:%.*]] = extractelement <2 x i8> [[TMP2]], i64 0
3737
; AVX-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i64
38-
; AVX-NEXT: [[TMP_4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP4]]
38+
; AVX-NEXT: [[T4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP4]]
3939
; AVX-NEXT: [[TMP5:%.*]] = extractelement <2 x i8> [[TMP2]], i64 1
4040
; AVX-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i64
41-
; AVX-NEXT: [[TMP_5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP6]]
42-
; AVX-NEXT: [[TMP_6:%.*]] = load i8, ptr [[TMP_4]], align 1
43-
; AVX-NEXT: [[TMP_7:%.*]] = load i8, ptr [[TMP_5]], align 1
44-
; AVX-NEXT: [[TMP_8:%.*]] = add i8 [[TMP_6]], [[TMP_7]]
45-
; AVX-NEXT: ret i8 [[TMP_8]]
41+
; AVX-NEXT: [[T5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP6]]
42+
; AVX-NEXT: [[T6:%.*]] = load i8, ptr [[T4]], align 1
43+
; AVX-NEXT: [[T7:%.*]] = load i8, ptr [[T5]], align 1
44+
; AVX-NEXT: [[T8:%.*]] = add i8 [[T6]], [[T7]]
45+
; AVX-NEXT: ret i8 [[T8]]
4646
;
4747
entry:
48-
%tmp_0 = zext i8 %v0 to i32
49-
%tmp_1 = zext i8 %v1 to i32
50-
%tmp_2 = or i32 %tmp_0, 1
51-
%tmp_3 = or i32 %tmp_1, 1
52-
%tmp_4 = getelementptr inbounds i8, ptr %ptr, i32 %tmp_2
53-
%tmp_5 = getelementptr inbounds i8, ptr %ptr, i32 %tmp_3
54-
%tmp_6 = load i8, ptr %tmp_4
55-
%tmp_7 = load i8, ptr %tmp_5
56-
%tmp_8 = add i8 %tmp_6, %tmp_7
57-
ret i8 %tmp_8
48+
%t0 = zext i8 %v0 to i32
49+
%t1 = zext i8 %v1 to i32
50+
%t2 = or i32 %t0, 1
51+
%t3 = or i32 %t1, 1
52+
%t4 = getelementptr inbounds i8, ptr %ptr, i32 %t2
53+
%t5 = getelementptr inbounds i8, ptr %ptr, i32 %t3
54+
%t6 = load i8, ptr %t4
55+
%t7 = load i8, ptr %t5
56+
%t8 = add i8 %t6, %t7
57+
ret i8 %t8
5858
}
5959

6060
; When computing minimum sizes, if we cannot prove the sign bit is zero, we
@@ -76,13 +76,13 @@ define i8 @PR31243_sext(i8 %v0, i8 %v1, i8 %v2, i8 %v3, ptr %ptr) {
7676
; SSE-NEXT: [[TMP0:%.*]] = or i8 [[V0:%.*]], 1
7777
; SSE-NEXT: [[TMP1:%.*]] = or i8 [[V1:%.*]], 1
7878
; SSE-NEXT: [[TMP2:%.*]] = sext i8 [[TMP0]] to i64
79-
; SSE-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP2]]
79+
; SSE-NEXT: [[T4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP2]]
8080
; SSE-NEXT: [[TMP3:%.*]] = sext i8 [[TMP1]] to i64
81-
; SSE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP3]]
82-
; SSE-NEXT: [[TMP6:%.*]] = load i8, ptr [[TMP4]], align 1
83-
; SSE-NEXT: [[TMP7:%.*]] = load i8, ptr [[TMP5]], align 1
84-
; SSE-NEXT: [[TMP8:%.*]] = add i8 [[TMP6]], [[TMP7]]
85-
; SSE-NEXT: ret i8 [[TMP8]]
81+
; SSE-NEXT: [[T5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP3]]
82+
; SSE-NEXT: [[T6:%.*]] = load i8, ptr [[T4]], align 1
83+
; SSE-NEXT: [[T7:%.*]] = load i8, ptr [[T5]], align 1
84+
; SSE-NEXT: [[T8:%.*]] = add i8 [[T6]], [[T7]]
85+
; SSE-NEXT: ret i8 [[T8]]
8686
;
8787
; AVX-LABEL: @PR31243_sext(
8888
; AVX-NEXT: entry:
@@ -92,24 +92,24 @@ define i8 @PR31243_sext(i8 %v0, i8 %v1, i8 %v2, i8 %v3, ptr %ptr) {
9292
; AVX-NEXT: [[TMP3:%.*]] = sext <2 x i8> [[TMP2]] to <2 x i16>
9393
; AVX-NEXT: [[TMP4:%.*]] = extractelement <2 x i16> [[TMP3]], i64 0
9494
; AVX-NEXT: [[TMP5:%.*]] = sext i16 [[TMP4]] to i64
95-
; AVX-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP5]]
95+
; AVX-NEXT: [[T4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP5]]
9696
; AVX-NEXT: [[TMP6:%.*]] = extractelement <2 x i16> [[TMP3]], i64 1
9797
; AVX-NEXT: [[TMP7:%.*]] = sext i16 [[TMP6]] to i64
98-
; AVX-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP7]]
99-
; AVX-NEXT: [[TMP6:%.*]] = load i8, ptr [[TMP4]], align 1
100-
; AVX-NEXT: [[TMP7:%.*]] = load i8, ptr [[TMP5]], align 1
101-
; AVX-NEXT: [[TMP8:%.*]] = add i8 [[TMP6]], [[TMP7]]
102-
; AVX-NEXT: ret i8 [[TMP8]]
98+
; AVX-NEXT: [[T5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP7]]
99+
; AVX-NEXT: [[T6:%.*]] = load i8, ptr [[T4]], align 1
100+
; AVX-NEXT: [[T7:%.*]] = load i8, ptr [[T5]], align 1
101+
; AVX-NEXT: [[T8:%.*]] = add i8 [[T6]], [[T7]]
102+
; AVX-NEXT: ret i8 [[T8]]
103103
;
104104
entry:
105-
%tmp0 = sext i8 %v0 to i32
106-
%tmp1 = sext i8 %v1 to i32
107-
%tmp2 = or i32 %tmp0, 1
108-
%tmp3 = or i32 %tmp1, 1
109-
%tmp4 = getelementptr inbounds i8, ptr %ptr, i32 %tmp2
110-
%tmp5 = getelementptr inbounds i8, ptr %ptr, i32 %tmp3
111-
%tmp6 = load i8, ptr %tmp4
112-
%tmp7 = load i8, ptr %tmp5
113-
%tmp8 = add i8 %tmp6, %tmp7
114-
ret i8 %tmp8
105+
%t0 = sext i8 %v0 to i32
106+
%t1 = sext i8 %v1 to i32
107+
%t2 = or i32 %t0, 1
108+
%t3 = or i32 %t1, 1
109+
%t4 = getelementptr inbounds i8, ptr %ptr, i32 %t2
110+
%t5 = getelementptr inbounds i8, ptr %ptr, i32 %t3
111+
%t6 = load i8, ptr %t4
112+
%t7 = load i8, ptr %t5
113+
%t8 = add i8 %t6, %t7
114+
ret i8 %t8
115115
}

0 commit comments

Comments
 (0)