Skip to content

Commit 1afadbf

Browse files
authored
[LoongArch] Pre-commit tests for saturation sadd/ssub/uadd/usub intrinsics (#158176)
1 parent eeba6f4 commit 1afadbf

File tree

8 files changed

+540
-0
lines changed

8 files changed

+540
-0
lines changed
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA32
3+
; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64
4+
5+
define <32 x i8> @xvsadd_b(<32 x i8> %a, <32 x i8> %b) {
6+
; CHECK-LABEL: xvsadd_b:
7+
; CHECK: # %bb.0:
8+
; CHECK-NEXT: xvadd.b $xr2, $xr0, $xr1
9+
; CHECK-NEXT: xvslt.b $xr0, $xr2, $xr0
10+
; CHECK-NEXT: xvslti.b $xr1, $xr1, 0
11+
; CHECK-NEXT: xvxor.v $xr0, $xr1, $xr0
12+
; CHECK-NEXT: xvsrai.b $xr1, $xr2, 7
13+
; CHECK-NEXT: xvbitrevi.b $xr1, $xr1, 7
14+
; CHECK-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
15+
; CHECK-NEXT: ret
16+
%ret = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
17+
ret <32 x i8> %ret
18+
}
19+
20+
define <16 x i16> @xvsadd_h(<16 x i16> %a, <16 x i16> %b) {
21+
; CHECK-LABEL: xvsadd_h:
22+
; CHECK: # %bb.0:
23+
; CHECK-NEXT: xvadd.h $xr2, $xr0, $xr1
24+
; CHECK-NEXT: xvslt.h $xr0, $xr2, $xr0
25+
; CHECK-NEXT: xvslti.h $xr1, $xr1, 0
26+
; CHECK-NEXT: xvxor.v $xr0, $xr1, $xr0
27+
; CHECK-NEXT: xvsrai.h $xr1, $xr2, 15
28+
; CHECK-NEXT: xvbitrevi.h $xr1, $xr1, 15
29+
; CHECK-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
30+
; CHECK-NEXT: ret
31+
%ret = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
32+
ret <16 x i16> %ret
33+
}
34+
35+
define <8 x i32> @xvsadd_w(<8 x i32> %a, <8 x i32> %b) {
36+
; CHECK-LABEL: xvsadd_w:
37+
; CHECK: # %bb.0:
38+
; CHECK-NEXT: xvadd.w $xr2, $xr0, $xr1
39+
; CHECK-NEXT: xvslt.w $xr0, $xr2, $xr0
40+
; CHECK-NEXT: xvslti.w $xr1, $xr1, 0
41+
; CHECK-NEXT: xvxor.v $xr0, $xr1, $xr0
42+
; CHECK-NEXT: xvsrai.w $xr1, $xr2, 31
43+
; CHECK-NEXT: xvbitrevi.w $xr1, $xr1, 31
44+
; CHECK-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
45+
; CHECK-NEXT: ret
46+
%ret = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %a, <8 x i32> %b)
47+
ret <8 x i32> %ret
48+
}
49+
50+
define <4 x i64> @xvsadd_d(<4 x i64> %a, <4 x i64> %b) {
51+
; LA32-LABEL: xvsadd_d:
52+
; LA32: # %bb.0:
53+
; LA32-NEXT: xvadd.d $xr2, $xr0, $xr1
54+
; LA32-NEXT: xvslt.d $xr0, $xr2, $xr0
55+
; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
56+
; LA32-NEXT: xvld $xr3, $a0, %pc_lo12(.LCPI3_0)
57+
; LA32-NEXT: xvslti.d $xr1, $xr1, 0
58+
; LA32-NEXT: xvxor.v $xr0, $xr1, $xr0
59+
; LA32-NEXT: xvsrai.d $xr1, $xr2, 63
60+
; LA32-NEXT: xvxor.v $xr1, $xr1, $xr3
61+
; LA32-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
62+
; LA32-NEXT: ret
63+
;
64+
; LA64-LABEL: xvsadd_d:
65+
; LA64: # %bb.0:
66+
; LA64-NEXT: xvadd.d $xr2, $xr0, $xr1
67+
; LA64-NEXT: xvslt.d $xr0, $xr2, $xr0
68+
; LA64-NEXT: xvslti.d $xr1, $xr1, 0
69+
; LA64-NEXT: xvxor.v $xr0, $xr1, $xr0
70+
; LA64-NEXT: xvsrai.d $xr1, $xr2, 63
71+
; LA64-NEXT: xvbitrevi.d $xr1, $xr1, 63
72+
; LA64-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
73+
; LA64-NEXT: ret
74+
%ret = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %a, <4 x i64> %b)
75+
ret <4 x i64> %ret
76+
}
77+
78+
declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>)
79+
declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>)
80+
declare <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32>, <8 x i32>)
81+
declare <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64>, <4 x i64>)
Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA32
3+
; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64
4+
5+
define <32 x i8> @xvssub_b(<32 x i8> %a, <32 x i8> %b) {
6+
; CHECK-LABEL: xvssub_b:
7+
; CHECK: # %bb.0:
8+
; CHECK-NEXT: xvrepli.b $xr2, 0
9+
; CHECK-NEXT: xvslt.b $xr2, $xr2, $xr1
10+
; CHECK-NEXT: xvsub.b $xr1, $xr0, $xr1
11+
; CHECK-NEXT: xvslt.b $xr0, $xr1, $xr0
12+
; CHECK-NEXT: xvxor.v $xr0, $xr2, $xr0
13+
; CHECK-NEXT: xvsrai.b $xr2, $xr1, 7
14+
; CHECK-NEXT: xvbitrevi.b $xr2, $xr2, 7
15+
; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
16+
; CHECK-NEXT: ret
17+
%ret = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
18+
ret <32 x i8> %ret
19+
}
20+
21+
define <16 x i16> @xvssub_h(<16 x i16> %a, <16 x i16> %b) {
22+
; CHECK-LABEL: xvssub_h:
23+
; CHECK: # %bb.0:
24+
; CHECK-NEXT: xvrepli.b $xr2, 0
25+
; CHECK-NEXT: xvslt.h $xr2, $xr2, $xr1
26+
; CHECK-NEXT: xvsub.h $xr1, $xr0, $xr1
27+
; CHECK-NEXT: xvslt.h $xr0, $xr1, $xr0
28+
; CHECK-NEXT: xvxor.v $xr0, $xr2, $xr0
29+
; CHECK-NEXT: xvsrai.h $xr2, $xr1, 15
30+
; CHECK-NEXT: xvbitrevi.h $xr2, $xr2, 15
31+
; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
32+
; CHECK-NEXT: ret
33+
%ret = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
34+
ret <16 x i16> %ret
35+
}
36+
37+
define <8 x i32> @xvssub_w(<8 x i32> %a, <8 x i32> %b) {
38+
; CHECK-LABEL: xvssub_w:
39+
; CHECK: # %bb.0:
40+
; CHECK-NEXT: xvrepli.b $xr2, 0
41+
; CHECK-NEXT: xvslt.w $xr2, $xr2, $xr1
42+
; CHECK-NEXT: xvsub.w $xr1, $xr0, $xr1
43+
; CHECK-NEXT: xvslt.w $xr0, $xr1, $xr0
44+
; CHECK-NEXT: xvxor.v $xr0, $xr2, $xr0
45+
; CHECK-NEXT: xvsrai.w $xr2, $xr1, 31
46+
; CHECK-NEXT: xvbitrevi.w $xr2, $xr2, 31
47+
; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
48+
; CHECK-NEXT: ret
49+
%ret = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %a, <8 x i32> %b)
50+
ret <8 x i32> %ret
51+
}
52+
53+
define <4 x i64> @xvssub_d(<4 x i64> %a, <4 x i64> %b) {
54+
; LA32-LABEL: xvssub_d:
55+
; LA32: # %bb.0:
56+
; LA32-NEXT: xvrepli.b $xr2, 0
57+
; LA32-NEXT: xvslt.d $xr2, $xr2, $xr1
58+
; LA32-NEXT: xvsub.d $xr1, $xr0, $xr1
59+
; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
60+
; LA32-NEXT: xvld $xr3, $a0, %pc_lo12(.LCPI3_0)
61+
; LA32-NEXT: xvslt.d $xr0, $xr1, $xr0
62+
; LA32-NEXT: xvxor.v $xr0, $xr2, $xr0
63+
; LA32-NEXT: xvsrai.d $xr2, $xr1, 63
64+
; LA32-NEXT: xvxor.v $xr2, $xr2, $xr3
65+
; LA32-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
66+
; LA32-NEXT: ret
67+
;
68+
; LA64-LABEL: xvssub_d:
69+
; LA64: # %bb.0:
70+
; LA64-NEXT: xvrepli.b $xr2, 0
71+
; LA64-NEXT: xvslt.d $xr2, $xr2, $xr1
72+
; LA64-NEXT: xvsub.d $xr1, $xr0, $xr1
73+
; LA64-NEXT: xvslt.d $xr0, $xr1, $xr0
74+
; LA64-NEXT: xvxor.v $xr0, $xr2, $xr0
75+
; LA64-NEXT: xvsrai.d $xr2, $xr1, 63
76+
; LA64-NEXT: xvbitrevi.d $xr2, $xr2, 63
77+
; LA64-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
78+
; LA64-NEXT: ret
79+
%ret = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %a, <4 x i64> %b)
80+
ret <4 x i64> %ret
81+
}
82+
83+
declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>)
84+
declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>)
85+
declare <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32>, <8 x i32>)
86+
declare <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64>, <4 x i64>)
Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
3+
; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
4+
5+
define <32 x i8> @xvuadd_b(<32 x i8> %a, <32 x i8> %b) {
6+
; CHECK-LABEL: xvuadd_b:
7+
; CHECK: # %bb.0:
8+
; CHECK-NEXT: xvxori.b $xr2, $xr1, 255
9+
; CHECK-NEXT: xvmin.bu $xr0, $xr0, $xr2
10+
; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1
11+
; CHECK-NEXT: ret
12+
%ret = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
13+
ret <32 x i8> %ret
14+
}
15+
16+
define <16 x i16> @xvuadd_h(<16 x i16> %a, <16 x i16> %b) {
17+
; CHECK-LABEL: xvuadd_h:
18+
; CHECK: # %bb.0:
19+
; CHECK-NEXT: xvrepli.b $xr2, -1
20+
; CHECK-NEXT: xvxor.v $xr2, $xr1, $xr2
21+
; CHECK-NEXT: xvmin.hu $xr0, $xr0, $xr2
22+
; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1
23+
; CHECK-NEXT: ret
24+
%ret = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
25+
ret <16 x i16> %ret
26+
}
27+
28+
define <8 x i32> @xvuadd_w(<8 x i32> %a, <8 x i32> %b) {
29+
; CHECK-LABEL: xvuadd_w:
30+
; CHECK: # %bb.0:
31+
; CHECK-NEXT: xvrepli.b $xr2, -1
32+
; CHECK-NEXT: xvxor.v $xr2, $xr1, $xr2
33+
; CHECK-NEXT: xvmin.wu $xr0, $xr0, $xr2
34+
; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1
35+
; CHECK-NEXT: ret
36+
%ret = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %a, <8 x i32> %b)
37+
ret <8 x i32> %ret
38+
}
39+
40+
define <4 x i64> @xvuadd_d(<4 x i64> %a, <4 x i64> %b) {
41+
; CHECK-LABEL: xvuadd_d:
42+
; CHECK: # %bb.0:
43+
; CHECK-NEXT: xvrepli.b $xr2, -1
44+
; CHECK-NEXT: xvxor.v $xr2, $xr1, $xr2
45+
; CHECK-NEXT: xvmin.du $xr0, $xr0, $xr2
46+
; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1
47+
; CHECK-NEXT: ret
48+
%ret = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %a, <4 x i64> %b)
49+
ret <4 x i64> %ret
50+
}
51+
52+
declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>)
53+
declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>)
54+
declare <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32>, <8 x i32>)
55+
declare <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64>, <4 x i64>)
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
3+
; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
4+
5+
define <32 x i8> @xvusub_b(<32 x i8> %a, <32 x i8> %b) {
6+
; CHECK-LABEL: xvusub_b:
7+
; CHECK: # %bb.0:
8+
; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1
9+
; CHECK-NEXT: xvsub.b $xr0, $xr0, $xr1
10+
; CHECK-NEXT: ret
11+
%ret = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
12+
ret <32 x i8> %ret
13+
}
14+
15+
define <16 x i16> @xvusub_h(<16 x i16> %a, <16 x i16> %b) {
16+
; CHECK-LABEL: xvusub_h:
17+
; CHECK: # %bb.0:
18+
; CHECK-NEXT: xvmax.hu $xr0, $xr0, $xr1
19+
; CHECK-NEXT: xvsub.h $xr0, $xr0, $xr1
20+
; CHECK-NEXT: ret
21+
%ret = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
22+
ret <16 x i16> %ret
23+
}
24+
25+
define <8 x i32> @xvusub_w(<8 x i32> %a, <8 x i32> %b) {
26+
; CHECK-LABEL: xvusub_w:
27+
; CHECK: # %bb.0:
28+
; CHECK-NEXT: xvmax.wu $xr0, $xr0, $xr1
29+
; CHECK-NEXT: xvsub.w $xr0, $xr0, $xr1
30+
; CHECK-NEXT: ret
31+
%ret = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %a, <8 x i32> %b)
32+
ret <8 x i32> %ret
33+
}
34+
35+
define <4 x i64> @xvusub_d(<4 x i64> %a, <4 x i64> %b) {
36+
; CHECK-LABEL: xvusub_d:
37+
; CHECK: # %bb.0:
38+
; CHECK-NEXT: xvmax.du $xr0, $xr0, $xr1
39+
; CHECK-NEXT: xvsub.d $xr0, $xr0, $xr1
40+
; CHECK-NEXT: ret
41+
%ret = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %a, <4 x i64> %b)
42+
ret <4 x i64> %ret
43+
}
44+
45+
declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>)
46+
declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>)
47+
declare <8 x i32> @llvm.usub.sat.v8i32(<8 x i32>, <8 x i32>)
48+
declare <4 x i64> @llvm.usub.sat.v4i64(<4 x i64>, <4 x i64>)
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA32
3+
; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA64
4+
5+
define <16 x i8> @vsadd_b(<16 x i8> %a, <16 x i8> %b) {
6+
; CHECK-LABEL: vsadd_b:
7+
; CHECK: # %bb.0:
8+
; CHECK-NEXT: vadd.b $vr2, $vr0, $vr1
9+
; CHECK-NEXT: vslt.b $vr0, $vr2, $vr0
10+
; CHECK-NEXT: vslti.b $vr1, $vr1, 0
11+
; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
12+
; CHECK-NEXT: vsrai.b $vr1, $vr2, 7
13+
; CHECK-NEXT: vbitrevi.b $vr1, $vr1, 7
14+
; CHECK-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
15+
; CHECK-NEXT: ret
16+
%ret = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
17+
ret <16 x i8> %ret
18+
}
19+
20+
define <8 x i16> @vsadd_h(<8 x i16> %a, <8 x i16> %b) {
21+
; CHECK-LABEL: vsadd_h:
22+
; CHECK: # %bb.0:
23+
; CHECK-NEXT: vadd.h $vr2, $vr0, $vr1
24+
; CHECK-NEXT: vslt.h $vr0, $vr2, $vr0
25+
; CHECK-NEXT: vslti.h $vr1, $vr1, 0
26+
; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
27+
; CHECK-NEXT: vsrai.h $vr1, $vr2, 15
28+
; CHECK-NEXT: vbitrevi.h $vr1, $vr1, 15
29+
; CHECK-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
30+
; CHECK-NEXT: ret
31+
%ret = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
32+
ret <8 x i16> %ret
33+
}
34+
35+
define <4 x i32> @vsadd_w(<4 x i32> %a, <4 x i32> %b) {
36+
; CHECK-LABEL: vsadd_w:
37+
; CHECK: # %bb.0:
38+
; CHECK-NEXT: vadd.w $vr2, $vr0, $vr1
39+
; CHECK-NEXT: vslt.w $vr0, $vr2, $vr0
40+
; CHECK-NEXT: vslti.w $vr1, $vr1, 0
41+
; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
42+
; CHECK-NEXT: vsrai.w $vr1, $vr2, 31
43+
; CHECK-NEXT: vbitrevi.w $vr1, $vr1, 31
44+
; CHECK-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
45+
; CHECK-NEXT: ret
46+
%ret = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %a, <4 x i32> %b)
47+
ret <4 x i32> %ret
48+
}
49+
50+
define <2 x i64> @vsadd_d(<2 x i64> %a, <2 x i64> %b) {
51+
; LA32-LABEL: vsadd_d:
52+
; LA32: # %bb.0:
53+
; LA32-NEXT: vadd.d $vr2, $vr0, $vr1
54+
; LA32-NEXT: vslt.d $vr0, $vr2, $vr0
55+
; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
56+
; LA32-NEXT: vld $vr3, $a0, %pc_lo12(.LCPI3_0)
57+
; LA32-NEXT: vslti.d $vr1, $vr1, 0
58+
; LA32-NEXT: vxor.v $vr0, $vr1, $vr0
59+
; LA32-NEXT: vsrai.d $vr1, $vr2, 63
60+
; LA32-NEXT: vxor.v $vr1, $vr1, $vr3
61+
; LA32-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
62+
; LA32-NEXT: ret
63+
;
64+
; LA64-LABEL: vsadd_d:
65+
; LA64: # %bb.0:
66+
; LA64-NEXT: vadd.d $vr2, $vr0, $vr1
67+
; LA64-NEXT: vslt.d $vr0, $vr2, $vr0
68+
; LA64-NEXT: vslti.d $vr1, $vr1, 0
69+
; LA64-NEXT: vxor.v $vr0, $vr1, $vr0
70+
; LA64-NEXT: vsrai.d $vr1, $vr2, 63
71+
; LA64-NEXT: vbitrevi.d $vr1, $vr1, 63
72+
; LA64-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
73+
; LA64-NEXT: ret
74+
%ret = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %a, <2 x i64> %b)
75+
ret <2 x i64> %ret
76+
}
77+
78+
declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
79+
declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
80+
declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
81+
declare <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64>, <2 x i64>)

0 commit comments

Comments
 (0)