22; RUN: opt < %s -passes=msan -S | FileCheck %s
33;
44; Forked from llvm/test/CodeGen/AArch64/arm64-vaddlv.ll
5- ;
6- ; Currently handled (suboptimally) by handleUnknownInstruction:
7- ; - llvm.aarch64.neon.saddlv
8- ; - llvm.aarch64.neon.uaddlv
95
106target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
117target triple = "aarch64--linux-android9001"
@@ -16,15 +12,10 @@ define i64 @test_vaddlv_s32(<2 x i32> %a1) nounwind readnone #0 {
1612; CHECK-NEXT: entry:
1713; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
1814; CHECK-NEXT: call void @llvm.donothing()
19- ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to i64
20- ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
21- ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF1:![0-9]+]]
22- ; CHECK: 2:
23- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3:[0-9]+]]
24- ; CHECK-NEXT: unreachable
25- ; CHECK: 3:
15+ ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]])
16+ ; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
2617; CHECK-NEXT: [[VADDLV_I:%.*]] = tail call i64 @llvm.aarch64.neon.saddlv.i64.v2i32(<2 x i32> [[A1]]) #[[ATTR2:[0-9]+]]
27- ; CHECK-NEXT: store i64 0 , ptr @__msan_retval_tls, align 8
18+ ; CHECK-NEXT: store i64 [[TMP2]] , ptr @__msan_retval_tls, align 8
2819; CHECK-NEXT: ret i64 [[VADDLV_I]]
2920;
3021entry:
@@ -38,15 +29,10 @@ define i64 @test_vaddlv_u32(<2 x i32> %a1) nounwind readnone #0 {
3829; CHECK-NEXT: entry:
3930; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
4031; CHECK-NEXT: call void @llvm.donothing()
41- ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to i64
42- ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
43- ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF1]]
44- ; CHECK: 2:
45- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]]
46- ; CHECK-NEXT: unreachable
47- ; CHECK: 3:
32+ ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]])
33+ ; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
4834; CHECK-NEXT: [[VADDLV_I:%.*]] = tail call i64 @llvm.aarch64.neon.uaddlv.i64.v2i32(<2 x i32> [[A1]]) #[[ATTR2]]
49- ; CHECK-NEXT: store i64 0 , ptr @__msan_retval_tls, align 8
35+ ; CHECK-NEXT: store i64 [[TMP2]] , ptr @__msan_retval_tls, align 8
5036; CHECK-NEXT: ret i64 [[VADDLV_I]]
5137;
5238entry:
@@ -59,6 +45,3 @@ declare i64 @llvm.aarch64.neon.uaddlv.i64.v2i32(<2 x i32>) nounwind readnone
5945declare i64 @llvm.aarch64.neon.saddlv.i64.v2i32 (<2 x i32 >) nounwind readnone
6046
6147attributes #0 = { sanitize_memory }
62- ;.
63- ; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
64- ;.
0 commit comments