Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 25 additions & 6 deletions llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4569,16 +4569,35 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
SC.Done(&I);
}

// Instrument abs intrinsic.
// handleUnknownIntrinsic can't handle it because of the last
// is_int_min_poison argument which does not match the result type.
// Instrument @llvm.abs intrinsic.
//
// e.g., i32 @llvm.abs.i32 (i32 <Src>, i1 <is_int_min_poison>)
// <4 x i32> @llvm.abs.v4i32(<4 x i32> <Src>, i1 <is_int_min_poison>)
void handleAbsIntrinsic(IntrinsicInst &I) {
assert(I.arg_size() == 2);
Value *Src = I.getArgOperand(0);
Value *IsIntMinPoison = I.getArgOperand(1);

assert(I.getType()->isIntOrIntVectorTy());
assert(I.getArgOperand(0)->getType() == I.getType());

// FIXME: Handle is_int_min_poison.
assert(Src->getType() == I.getType());

assert(IsIntMinPoison->getType()->isIntegerTy());
assert(IsIntMinPoison->getType()->getIntegerBitWidth() == 1);

IRBuilder<> IRB(&I);
setShadow(&I, getShadow(&I, 0));
Value *SrcShadow = getShadow(Src);

APInt MinVal =
APInt::getSignedMinValue(Src->getType()->getScalarSizeInBits());
Value *MinValVec = ConstantInt::get(Src->getType(), MinVal);
Value *SrcIsMin = IRB.CreateICmp(CmpInst::ICMP_EQ, Src, MinValVec);

Value *PoisonedShadow = getPoisonedShadow(Src);
Value *PoisonedIfIntMinShadow = IRB.CreateSelect(SrcIsMin, PoisonedShadow, SrcShadow);
Value *Shadow = IRB.CreateSelect(IsIntMinPoison, PoisonedIfIntMinShadow, SrcShadow);

setShadow(&I, Shadow);
setOrigin(&I, getOrigin(&I, 0));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11436,8 +11436,11 @@ define <16 x i32>@test_int_x86_avx512_pabs_d_512(<16 x i32> %x0, <16 x i32> %x1)
; CHECK-LABEL: @test_int_x86_avx512_pabs_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i32> @llvm.abs.v16i32(<16 x i32> [[X0:%.*]], i1 false)
; CHECK-NEXT: store <16 x i32> [[TMP1]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <16 x i32> [[X0:%.*]], splat (i32 -2147483648)
; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP5]], <16 x i32> splat (i32 -1), <16 x i32> [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = select i1 false, <16 x i32> [[TMP3]], <16 x i32> [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i32> @llvm.abs.v16i32(<16 x i32> [[X0]], i1 false)
; CHECK-NEXT: store <16 x i32> [[TMP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <16 x i32> [[TMP2]]
;
%res = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %x0, <16 x i32> %x1, i16 -1)
Expand All @@ -11451,12 +11454,15 @@ define <16 x i32>@test_int_x86_avx512_mask_pabs_d_512(<16 x i32> %x0, <16 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.abs.v16i32(<16 x i32> [[X0:%.*]], i1 false)
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <16 x i32> [[X0:%.*]], splat (i32 -2147483648)
; CHECK-NEXT: [[TMP13:%.*]] = select <16 x i1> [[TMP12]], <16 x i32> splat (i32 -1), <16 x i32> [[TMP1]]
; CHECK-NEXT: [[TMP14:%.*]] = select i1 false, <16 x i32> [[TMP13]], <16 x i32> [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.abs.v16i32(<16 x i32> [[X0]], i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[X2:%.*]] to <16 x i1>
; CHECK-NEXT: [[TMP7:%.*]] = select <16 x i1> [[TMP6]], <16 x i32> [[TMP1]], <16 x i32> [[TMP3]]
; CHECK-NEXT: [[TMP7:%.*]] = select <16 x i1> [[TMP6]], <16 x i32> [[TMP14]], <16 x i32> [[TMP3]]
; CHECK-NEXT: [[TMP8:%.*]] = xor <16 x i32> [[TMP4]], [[X1:%.*]]
; CHECK-NEXT: [[TMP9:%.*]] = or <16 x i32> [[TMP8]], [[TMP1]]
; CHECK-NEXT: [[TMP9:%.*]] = or <16 x i32> [[TMP8]], [[TMP14]]
; CHECK-NEXT: [[TMP10:%.*]] = or <16 x i32> [[TMP9]], [[TMP3]]
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <16 x i1> [[TMP5]], <16 x i32> [[TMP10]], <16 x i32> [[TMP7]]
; CHECK-NEXT: [[TMP11:%.*]] = select <16 x i1> [[TMP6]], <16 x i32> [[TMP4]], <16 x i32> [[X1]]
Expand All @@ -11473,8 +11479,11 @@ define <8 x i64>@test_int_x86_avx512_pabs_q_512(<8 x i64> %x0, <8 x i64> %x1, i8
; CHECK-LABEL: @test_int_x86_avx512_pabs_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i64> @llvm.abs.v8i64(<8 x i64> [[X0:%.*]], i1 false)
; CHECK-NEXT: store <8 x i64> [[TMP1]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <8 x i64> [[X0:%.*]], splat (i64 -9223372036854775808)
; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP5]], <8 x i64> splat (i64 -1), <8 x i64> [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = select i1 false, <8 x i64> [[TMP3]], <8 x i64> [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i64> @llvm.abs.v8i64(<8 x i64> [[X0]], i1 false)
; CHECK-NEXT: store <8 x i64> [[TMP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i64> [[TMP2]]
;
%res = call <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64> %x0, <8 x i64> %x1, i8 -1)
Expand All @@ -11488,12 +11497,15 @@ define <8 x i64>@test_int_x86_avx512_mask_pabs_q_512(<8 x i64> %x0, <8 x i64> %x
; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.abs.v8i64(<8 x i64> [[X0:%.*]], i1 false)
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <8 x i64> [[X0:%.*]], splat (i64 -9223372036854775808)
; CHECK-NEXT: [[TMP13:%.*]] = select <8 x i1> [[TMP12]], <8 x i64> splat (i64 -1), <8 x i64> [[TMP1]]
; CHECK-NEXT: [[TMP14:%.*]] = select i1 false, <8 x i64> [[TMP13]], <8 x i64> [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.abs.v8i64(<8 x i64> [[X0]], i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP2]] to <8 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[X2:%.*]] to <8 x i1>
; CHECK-NEXT: [[TMP7:%.*]] = select <8 x i1> [[TMP6]], <8 x i64> [[TMP1]], <8 x i64> [[TMP3]]
; CHECK-NEXT: [[TMP7:%.*]] = select <8 x i1> [[TMP6]], <8 x i64> [[TMP14]], <8 x i64> [[TMP3]]
; CHECK-NEXT: [[TMP8:%.*]] = xor <8 x i64> [[TMP4]], [[X1:%.*]]
; CHECK-NEXT: [[TMP9:%.*]] = or <8 x i64> [[TMP8]], [[TMP1]]
; CHECK-NEXT: [[TMP9:%.*]] = or <8 x i64> [[TMP8]], [[TMP14]]
; CHECK-NEXT: [[TMP10:%.*]] = or <8 x i64> [[TMP9]], [[TMP3]]
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <8 x i1> [[TMP5]], <8 x i64> [[TMP10]], <8 x i64> [[TMP7]]
; CHECK-NEXT: [[TMP11:%.*]] = select <8 x i1> [[TMP6]], <8 x i64> [[TMP4]], <8 x i64> [[X1]]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4443,8 +4443,11 @@ define <32 x i16> @test_int_x86_avx512_pabs_w_512(<32 x i16> %x0, <32 x i16> %x1
; CHECK-LABEL: @test_int_x86_avx512_pabs_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <32 x i16> @llvm.abs.v32i16(<32 x i16> [[X0:%.*]], i1 false)
; CHECK-NEXT: store <32 x i16> [[TMP1]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <32 x i16> [[X0:%.*]], splat (i16 -32768)
; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP5]], <32 x i16> splat (i16 -1), <32 x i16> [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = select i1 false, <32 x i16> [[TMP3]], <32 x i16> [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = call <32 x i16> @llvm.abs.v32i16(<32 x i16> [[X0]], i1 false)
; CHECK-NEXT: store <32 x i16> [[TMP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <32 x i16> [[TMP2]]
;
%res = call <32 x i16> @llvm.x86.avx512.mask.pabs.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 -1)
Expand All @@ -4457,12 +4460,15 @@ define <32 x i16> @test_int_x86_avx512_mask_pabs_w_512(<32 x i16> %x0, <32 x i16
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.abs.v32i16(<32 x i16> [[X0:%.*]], i1 false)
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <32 x i16> [[X0:%.*]], splat (i16 -32768)
; CHECK-NEXT: [[TMP13:%.*]] = select <32 x i1> [[TMP12]], <32 x i16> splat (i16 -1), <32 x i16> [[TMP1]]
; CHECK-NEXT: [[TMP14:%.*]] = select i1 false, <32 x i16> [[TMP13]], <32 x i16> [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.abs.v32i16(<32 x i16> [[X0]], i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP2]] to <32 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32 [[X2:%.*]] to <32 x i1>
; CHECK-NEXT: [[TMP7:%.*]] = select <32 x i1> [[TMP6]], <32 x i16> [[TMP1]], <32 x i16> [[TMP3]]
; CHECK-NEXT: [[TMP7:%.*]] = select <32 x i1> [[TMP6]], <32 x i16> [[TMP14]], <32 x i16> [[TMP3]]
; CHECK-NEXT: [[TMP8:%.*]] = xor <32 x i16> [[TMP4]], [[X1:%.*]]
; CHECK-NEXT: [[TMP9:%.*]] = or <32 x i16> [[TMP8]], [[TMP1]]
; CHECK-NEXT: [[TMP9:%.*]] = or <32 x i16> [[TMP8]], [[TMP14]]
; CHECK-NEXT: [[TMP10:%.*]] = or <32 x i16> [[TMP9]], [[TMP3]]
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <32 x i1> [[TMP5]], <32 x i16> [[TMP10]], <32 x i16> [[TMP7]]
; CHECK-NEXT: [[TMP11:%.*]] = select <32 x i1> [[TMP6]], <32 x i16> [[TMP4]], <32 x i16> [[X1]]
Expand All @@ -4479,8 +4485,11 @@ define <64 x i8> @test_int_x86_avx512_pabs_b_512(<64 x i8> %x0, <64 x i8> %x1) n
; CHECK-LABEL: @test_int_x86_avx512_pabs_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <64 x i8> @llvm.abs.v64i8(<64 x i8> [[X0:%.*]], i1 false)
; CHECK-NEXT: store <64 x i8> [[TMP1]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <64 x i8> [[X0:%.*]], splat (i8 -128)
; CHECK-NEXT: [[TMP3:%.*]] = select <64 x i1> [[TMP5]], <64 x i8> splat (i8 -1), <64 x i8> [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = select i1 false, <64 x i8> [[TMP3]], <64 x i8> [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = call <64 x i8> @llvm.abs.v64i8(<64 x i8> [[X0]], i1 false)
; CHECK-NEXT: store <64 x i8> [[TMP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <64 x i8> [[TMP2]]
;
%res = call <64 x i8> @llvm.x86.avx512.mask.pabs.b.512(<64 x i8> %x0, <64 x i8> %x1, i64 -1)
Expand All @@ -4493,12 +4502,15 @@ define <64 x i8> @test_int_x86_avx512_mask_pabs_b_512(<64 x i8> %x0, <64 x i8> %
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <64 x i8> @llvm.abs.v64i8(<64 x i8> [[X0:%.*]], i1 false)
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <64 x i8> [[X0:%.*]], splat (i8 -128)
; CHECK-NEXT: [[TMP13:%.*]] = select <64 x i1> [[TMP12]], <64 x i8> splat (i8 -1), <64 x i8> [[TMP1]]
; CHECK-NEXT: [[TMP14:%.*]] = select i1 false, <64 x i8> [[TMP13]], <64 x i8> [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = call <64 x i8> @llvm.abs.v64i8(<64 x i8> [[X0]], i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i64 [[TMP2]] to <64 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i64 [[X2:%.*]] to <64 x i1>
; CHECK-NEXT: [[TMP7:%.*]] = select <64 x i1> [[TMP6]], <64 x i8> [[TMP1]], <64 x i8> [[TMP3]]
; CHECK-NEXT: [[TMP7:%.*]] = select <64 x i1> [[TMP6]], <64 x i8> [[TMP14]], <64 x i8> [[TMP3]]
; CHECK-NEXT: [[TMP8:%.*]] = xor <64 x i8> [[TMP4]], [[X1:%.*]]
; CHECK-NEXT: [[TMP9:%.*]] = or <64 x i8> [[TMP8]], [[TMP1]]
; CHECK-NEXT: [[TMP9:%.*]] = or <64 x i8> [[TMP8]], [[TMP14]]
; CHECK-NEXT: [[TMP10:%.*]] = or <64 x i8> [[TMP9]], [[TMP3]]
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <64 x i1> [[TMP5]], <64 x i8> [[TMP10]], <64 x i8> [[TMP7]]
; CHECK-NEXT: [[TMP11:%.*]] = select <64 x i1> [[TMP6]], <64 x i8> [[TMP4]], <64 x i8> [[X1]]
Expand Down
87 changes: 48 additions & 39 deletions llvm/test/Instrumentation/MemorySanitizer/abs-vector.ll
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,22 @@ target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
target triple = "x86_64-unknown-linux-gnu"

define <4 x i64> @test_mm256_abs_epi8(<4 x i64> %a) local_unnamed_addr #0 {
; CHECK-LABEL: @test_mm256_abs_epi8(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CHECK: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <32 x i8>
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
; CHECK-NEXT: [[TMP4:%.*]] = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> [[TMP3]], i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <32 x i8> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <32 x i8> [[TMP4]] to <4 x i64>
; CHECK-NEXT: store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; CHECK: ret <4 x i64> [[TMP6]]
; ORIGIN-LABEL: @test_mm256_abs_epi8(
; ORIGIN-NEXT: entry:
; ORIGIN-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <32 x i8>
; ORIGIN-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
; ORIGIN-NEXT: [[TMP4:%.*]] = icmp eq <32 x i8> [[TMP3]], splat (i8 -128)
; ORIGIN-NEXT: [[TMP5:%.*]] = select <32 x i1> [[TMP4]], <32 x i8> splat (i8 -1), <32 x i8> [[TMP2]]
; ORIGIN-NEXT: [[TMP6:%.*]] = select i1 false, <32 x i8> [[TMP5]], <32 x i8> [[TMP2]]
; ORIGIN-NEXT: [[TMP7:%.*]] = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> [[TMP3]], i1 false)
; ORIGIN-NEXT: [[TMP8:%.*]] = bitcast <32 x i8> [[TMP6]] to <4 x i64>
; ORIGIN-NEXT: [[TMP9:%.*]] = bitcast <32 x i8> [[TMP7]] to <4 x i64>
; ORIGIN-NEXT: store <4 x i64> [[TMP8]], ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; ORIGIN-NEXT: ret <4 x i64> [[TMP9]]
;
entry:
%0 = bitcast <4 x i64> %a to <32 x i8>
Expand All @@ -28,19 +31,22 @@ entry:
}

define <4 x i64> @test_mm256_abs_epi16(<4 x i64> %a) local_unnamed_addr #0 {
; CHECK-LABEL: @test_mm256_abs_epi16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CHECK: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <16 x i16>
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <16 x i16>
; CHECK-NEXT: [[TMP4:%.*]] = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> [[TMP3]], i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i16> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i16> [[TMP4]] to <4 x i64>
; CHECK-NEXT: store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; CHECK: ret <4 x i64> [[TMP6]]
; ORIGIN-LABEL: @test_mm256_abs_epi16(
; ORIGIN-NEXT: entry:
; ORIGIN-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <16 x i16>
; ORIGIN-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <16 x i16>
; ORIGIN-NEXT: [[TMP4:%.*]] = icmp eq <16 x i16> [[TMP3]], splat (i16 -32768)
; ORIGIN-NEXT: [[TMP5:%.*]] = select <16 x i1> [[TMP4]], <16 x i16> splat (i16 -1), <16 x i16> [[TMP2]]
; ORIGIN-NEXT: [[TMP6:%.*]] = select i1 false, <16 x i16> [[TMP5]], <16 x i16> [[TMP2]]
; ORIGIN-NEXT: [[TMP7:%.*]] = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> [[TMP3]], i1 false)
; ORIGIN-NEXT: [[TMP8:%.*]] = bitcast <16 x i16> [[TMP6]] to <4 x i64>
; ORIGIN-NEXT: [[TMP9:%.*]] = bitcast <16 x i16> [[TMP7]] to <4 x i64>
; ORIGIN-NEXT: store <4 x i64> [[TMP8]], ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; ORIGIN-NEXT: ret <4 x i64> [[TMP9]]
;
entry:
%0 = bitcast <4 x i64> %a to <16 x i16>
Expand All @@ -50,19 +56,22 @@ entry:
}

define <4 x i64> @test_mm256_abs_epi32(<4 x i64> %a) local_unnamed_addr #0 {
; CHECK-LABEL: @test_mm256_abs_epi32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CHECK: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <8 x i32>
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <8 x i32>
; CHECK-NEXT: [[TMP4:%.*]] = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> [[TMP3]], i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i32> [[TMP4]] to <4 x i64>
; CHECK-NEXT: store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; CHECK: ret <4 x i64> [[TMP6]]
; ORIGIN-LABEL: @test_mm256_abs_epi32(
; ORIGIN-NEXT: entry:
; ORIGIN-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <8 x i32>
; ORIGIN-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <8 x i32>
; ORIGIN-NEXT: [[TMP4:%.*]] = icmp eq <8 x i32> [[TMP3]], splat (i32 -2147483648)
; ORIGIN-NEXT: [[TMP5:%.*]] = select <8 x i1> [[TMP4]], <8 x i32> splat (i32 -1), <8 x i32> [[TMP2]]
; ORIGIN-NEXT: [[TMP6:%.*]] = select i1 false, <8 x i32> [[TMP5]], <8 x i32> [[TMP2]]
; ORIGIN-NEXT: [[TMP7:%.*]] = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> [[TMP3]], i1 false)
; ORIGIN-NEXT: [[TMP8:%.*]] = bitcast <8 x i32> [[TMP6]] to <4 x i64>
; ORIGIN-NEXT: [[TMP9:%.*]] = bitcast <8 x i32> [[TMP7]] to <4 x i64>
; ORIGIN-NEXT: store <4 x i64> [[TMP8]], ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; ORIGIN-NEXT: ret <4 x i64> [[TMP9]]
;
entry:
%0 = bitcast <4 x i64> %a to <8 x i32>
Expand Down
Loading