Skip to content

Commit 4aa6abe

Browse files
committed
[msan] Fix llvm.abs.v intrinsic
The last argument of the intrinsic is a boolean flag to control INT_MIN handling and does not affect msan metadata.
1 parent 1fd9a14 commit 4aa6abe

File tree

2 files changed

+29
-33
lines changed

2 files changed

+29
-33
lines changed

llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2638,6 +2638,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
26382638
return false;
26392639

26402640
unsigned NumArgOperands = I.getNumArgOperands();
2641+
if (I.getIntrinsicID() == Intrinsic::abs) {
2642+
assert(NumArgOperands == 2);
2643+
// The last argument is just a boolean flag.
2644+
NumArgOperands = 1;
2645+
}
26412646

26422647
for (unsigned i = 0; i < NumArgOperands; ++i) {
26432648
Type *Ty = I.getArgOperand(i)->getType();

llvm/test/Instrumentation/MemorySanitizer/abs-vector.ll

Lines changed: 24 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -12,18 +12,15 @@ define <4 x i64> @test_mm256_abs_epi8(<4 x i64> noundef %a) local_unnamed_addr #
1212
; CHECK-NEXT: entry:
1313
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
1414
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
15-
; CHECK: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <32 x i8>
15+
; CHECK: call void @llvm.donothing()
16+
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <32 x i8>
1617
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
17-
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i8> [[TMP2]] to i256
18-
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
19-
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof !2
20-
; CHECK: call void @__msan_warning_with_origin_noreturn
21-
; CHECK: unreachable
22-
; CHECK: [[TMP7:%.*]] = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> [[TMP3]], i1 false)
23-
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <32 x i8> [[TMP7]] to <4 x i64>
24-
; CHECK-NEXT: store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
25-
; ORIGIN-NEXT: store i32 0, i32* @__msan_retval_origin_tls, align 4
26-
; CHECK: ret <4 x i64> [[TMP8]]
18+
; CHECK-NEXT: [[TMP4:%.*]] = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> [[TMP3]], i1 false)
19+
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <32 x i8> [[TMP2]] to <4 x i64>
20+
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <32 x i8> [[TMP4]] to <4 x i64>
21+
; CHECK-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
22+
; ORIGIN-NEXT: store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
23+
; CHECK: ret <4 x i64> [[TMP6]]
2724
;
2825
entry:
2926
%0 = bitcast <4 x i64> %a to <32 x i8>
@@ -37,18 +34,15 @@ define <4 x i64> @test_mm256_abs_epi16(<4 x i64> %a) local_unnamed_addr #0 {
3734
; CHECK-NEXT: entry:
3835
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
3936
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
40-
; CHECK: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <16 x i16>
37+
; CHECK: call void @llvm.donothing()
38+
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <16 x i16>
4139
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <16 x i16>
42-
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP2]] to i256
43-
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
44-
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof !2
45-
; CHECK: call void @__msan_warning_with_origin_noreturn
46-
; CHECK: unreachable
47-
; CHECK: [[TMP7:%.*]] = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> [[TMP3]], i1 false)
48-
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <16 x i16> [[TMP7]] to <4 x i64>
49-
; CHECK-NEXT: store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
50-
; ORIGIN-NEXT: store i32 0, i32* @__msan_retval_origin_tls, align 4
51-
; CHECK: ret <4 x i64> [[TMP8]]
40+
; CHECK-NEXT: [[TMP4:%.*]] = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> [[TMP3]], i1 false)
41+
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i16> [[TMP2]] to <4 x i64>
42+
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i16> [[TMP4]] to <4 x i64>
43+
; CHECK-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
44+
; ORIGIN-NEXT: store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
45+
; CHECK: ret <4 x i64> [[TMP6]]
5246
;
5347
entry:
5448
%0 = bitcast <4 x i64> %a to <16 x i16>
@@ -62,18 +56,15 @@ define <4 x i64> @test_mm256_abs_epi32(<4 x i64> %a) local_unnamed_addr #0 {
6256
; CHECK-NEXT: entry:
6357
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
6458
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
65-
; CHECK: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <8 x i32>
59+
; CHECK: call void @llvm.donothing()
60+
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <8 x i32>
6661
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <8 x i32>
67-
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
68-
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
69-
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof !2
70-
; CHECK: call void @__msan_warning_with_origin_noreturn
71-
; CHECK: unreachable
72-
; CHECK: [[TMP7:%.*]] = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> [[TMP3]], i1 false)
73-
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x i32> [[TMP7]] to <4 x i64>
74-
; CHECK-NEXT: store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
75-
; ORIGIN-NEXT: store i32 0, i32* @__msan_retval_origin_tls, align 4
76-
; CHECK: ret <4 x i64> [[TMP8]]
62+
; CHECK-NEXT: [[TMP4:%.*]] = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> [[TMP3]], i1 false)
63+
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP2]] to <4 x i64>
64+
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i32> [[TMP4]] to <4 x i64>
65+
; CHECK-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
66+
; ORIGIN-NEXT: store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
67+
; CHECK: ret <4 x i64> [[TMP6]]
7768
;
7869
entry:
7970
%0 = bitcast <4 x i64> %a to <8 x i32>

0 commit comments

Comments
 (0)