1- ! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
1+ ! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
2+ ! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
23! REQUIRES: target=powerpc{{.*}}
34
45!- ---------------------
@@ -14,7 +15,7 @@ subroutine vec_cmpge_test_i8(arg1, arg2)
1415! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
1516! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
1617! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %[[arg2]], <2 x i64> %[[arg1]])
17- ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
18+ ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat ( i64 -1)
1819end subroutine vec_cmpge_test_i8
1920
2021! CHECK-LABEL: vec_cmpge_test_i4
@@ -26,7 +27,7 @@ subroutine vec_cmpge_test_i4(arg1, arg2)
2627! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
2728! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
2829! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtsw(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]])
29- ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
30+ ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat ( i32 -1)
3031end subroutine vec_cmpge_test_i4
3132
3233! CHECK-LABEL: vec_cmpge_test_i2
@@ -38,7 +39,7 @@ subroutine vec_cmpge_test_i2(arg1, arg2)
3839! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
3940! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
4041! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtsh(<8 x i16> %[[arg2]], <8 x i16> %[[arg1]])
41- ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
42+ ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat ( i16 -1)
4243end subroutine vec_cmpge_test_i2
4344
4445! CHECK-LABEL: vec_cmpge_test_i1
@@ -50,7 +51,7 @@ subroutine vec_cmpge_test_i1(arg1, arg2)
5051! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
5152! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
5253! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtsb(<16 x i8> %[[arg2]], <16 x i8> %[[arg1]])
53- ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
54+ ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat ( i8 -1)
5455end subroutine vec_cmpge_test_i1
5556
5657! CHECK-LABEL: vec_cmpge_test_u8
@@ -62,7 +63,7 @@ subroutine vec_cmpge_test_u8(arg1, arg2)
6263! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
6364! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
6465! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %[[arg2]], <2 x i64> %[[arg1]])
65- ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
66+ ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat ( i64 -1)
6667end subroutine vec_cmpge_test_u8
6768
6869! CHECK-LABEL: vec_cmpge_test_u4
@@ -74,7 +75,7 @@ subroutine vec_cmpge_test_u4(arg1, arg2)
7475! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
7576! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
7677! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtuw(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]])
77- ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
78+ ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat ( i32 -1)
7879end subroutine vec_cmpge_test_u4
7980
8081! CHECK-LABEL: vec_cmpge_test_u2
@@ -86,7 +87,7 @@ subroutine vec_cmpge_test_u2(arg1, arg2)
8687! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
8788! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
8889! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtuh(<8 x i16> %[[arg2]], <8 x i16> %[[arg1]])
89- ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
90+ ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat ( i16 -1)
9091end subroutine vec_cmpge_test_u2
9192
9293! CHECK-LABEL: vec_cmpge_test_u1
@@ -98,7 +99,7 @@ subroutine vec_cmpge_test_u1(arg1, arg2)
9899! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
99100! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
100101! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtub(<16 x i8> %[[arg2]], <16 x i8> %[[arg1]])
101- ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
102+ ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat ( i8 -1)
102103end subroutine vec_cmpge_test_u1
103104
104105subroutine vec_cmpge_test_r4 (arg1 , arg2 )
@@ -248,7 +249,7 @@ subroutine vec_cmple_test_i8(arg1, arg2)
248249! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
249250! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
250251! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %[[arg1]], <2 x i64> %[[arg2]])
251- ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
252+ ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat ( i64 -1)
252253end subroutine vec_cmple_test_i8
253254
254255! CHECK-LABEL: vec_cmple_test_i4
@@ -260,7 +261,7 @@ subroutine vec_cmple_test_i4(arg1, arg2)
260261! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
261262! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
262263! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtsw(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]])
263- ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
264+ ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat ( i32 -1)
264265end subroutine vec_cmple_test_i4
265266
266267! CHECK-LABEL: vec_cmple_test_i2
@@ -272,7 +273,7 @@ subroutine vec_cmple_test_i2(arg1, arg2)
272273! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
273274! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
274275! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtsh(<8 x i16> %[[arg1]], <8 x i16> %[[arg2]])
275- ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
276+ ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat ( i16 -1)
276277end subroutine vec_cmple_test_i2
277278
278279! CHECK-LABEL: vec_cmple_test_i1
@@ -284,7 +285,7 @@ subroutine vec_cmple_test_i1(arg1, arg2)
284285! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
285286! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
286287! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtsb(<16 x i8> %[[arg1]], <16 x i8> %[[arg2]])
287- ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
288+ ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat ( i8 -1)
288289end subroutine vec_cmple_test_i1
289290
290291! CHECK-LABEL: vec_cmple_test_u8
@@ -296,7 +297,7 @@ subroutine vec_cmple_test_u8(arg1, arg2)
296297! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
297298! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
298299! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %[[arg1]], <2 x i64> %[[arg2]])
299- ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
300+ ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat ( i64 -1)
300301end subroutine vec_cmple_test_u8
301302
302303! CHECK-LABEL: vec_cmple_test_u4
@@ -308,7 +309,7 @@ subroutine vec_cmple_test_u4(arg1, arg2)
308309! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
309310! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
310311! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtuw(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]])
311- ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
312+ ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat ( i32 -1)
312313end subroutine vec_cmple_test_u4
313314
314315! CHECK-LABEL: vec_cmple_test_u2
@@ -320,7 +321,7 @@ subroutine vec_cmple_test_u2(arg1, arg2)
320321! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
321322! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
322323! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtuh(<8 x i16> %[[arg1]], <8 x i16> %[[arg2]])
323- ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
324+ ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat ( i16 -1)
324325end subroutine vec_cmple_test_u2
325326
326327! CHECK-LABEL: vec_cmple_test_u1
@@ -332,7 +333,7 @@ subroutine vec_cmple_test_u1(arg1, arg2)
332333! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
333334! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
334335! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtub(<16 x i8> %[[arg1]], <16 x i8> %[[arg2]])
335- ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
336+ ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat ( i8 -1)
336337end subroutine vec_cmple_test_u1
337338
338339! CHECK-LABEL: vec_cmple_test_r4
0 commit comments