Skip to content

Commit 89ce3d8

Browse files
committed
Emit Intrinsic::aarch64_sve_and_u rather than stock and.
1 parent a381043 commit 89ce3d8

File tree

2 files changed

+27
-19
lines changed

2 files changed

+27
-19
lines changed

llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2654,7 +2654,9 @@ static std::optional<Instruction *> instCombineSVEUxt(InstCombiner &IC,
26542654
auto *Mask = ConstantVector::getSplat(
26552655
Ty->getElementCount(),
26562656
ConstantInt::get(Ty->getElementType(), MaskValue));
2657-
return IC.replaceInstUsesWith(II, IC.Builder.CreateAnd(Op, Mask));
2657+
auto *And = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_and_u, {Ty},
2658+
{Pg, Op, Mask});
2659+
return IC.replaceInstUsesWith(II, And);
26582660
}
26592661

26602662
return std::nullopt;

llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-uxt.ll

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ target triple = "aarch64-unknown-linux-gnu"
66
define <vscale x 2 x i64> @uxtb_z_64(<vscale x 2 x i64> %0) #0 {
77
; CHECK-LABEL: define <vscale x 2 x i64> @uxtb_z_64(
88
; CHECK-SAME: <vscale x 2 x i64> [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
9-
; CHECK-NEXT: [[TMP2:%.*]] = and <vscale x 2 x i64> [[TMP0]], splat (i64 255)
9+
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> splat (i64 255))
1010
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
1111
;
1212
%2 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxtb.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> %0)
@@ -16,7 +16,7 @@ define <vscale x 2 x i64> @uxtb_z_64(<vscale x 2 x i64> %0) #0 {
1616
define <vscale x 2 x i64> @uxtb_m_64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1) #0 {
1717
; CHECK-LABEL: define <vscale x 2 x i64> @uxtb_m_64(
1818
; CHECK-SAME: <vscale x 2 x i64> [[TMP0:%.*]], <vscale x 2 x i64> [[TMP1:%.*]]) #[[ATTR0]] {
19-
; CHECK-NEXT: [[TMP3:%.*]] = and <vscale x 2 x i64> [[TMP0]], splat (i64 255)
19+
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> splat (i64 255))
2020
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
2121
;
2222
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxtb.nxv2i64(<vscale x 2 x i64> %1, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> %0)
@@ -26,7 +26,8 @@ define <vscale x 2 x i64> @uxtb_m_64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %
2626
define <vscale x 2 x i64> @uxtb_x_64(<vscale x 16 x i1> %0, <vscale x 2 x i64> %1) #0 {
2727
; CHECK-LABEL: define <vscale x 2 x i64> @uxtb_x_64(
2828
; CHECK-SAME: <vscale x 16 x i1> [[TMP0:%.*]], <vscale x 2 x i64> [[TMP1:%.*]]) #[[ATTR0]] {
29-
; CHECK-NEXT: [[TMP4:%.*]] = and <vscale x 2 x i64> [[TMP1]], splat (i64 255)
29+
; CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[TMP0]])
30+
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> [[TMP3]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> splat (i64 255))
3031
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
3132
;
3233
%3 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
@@ -61,7 +62,7 @@ define <vscale x 2 x i64> @uxtb_m_64_no_ptrue(<vscale x 16 x i1> %0, <vscale x 2
6162
define <vscale x 4 x i32> @uxtb_z_32(<vscale x 4 x i32> %0) #0 {
6263
; CHECK-LABEL: define <vscale x 4 x i32> @uxtb_z_32(
6364
; CHECK-SAME: <vscale x 4 x i32> [[TMP0:%.*]]) #[[ATTR0]] {
64-
; CHECK-NEXT: [[TMP2:%.*]] = and <vscale x 4 x i32> [[TMP0]], splat (i32 255)
65+
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> splat (i32 255))
6566
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
6667
;
6768
%2 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uxtb.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> %0)
@@ -71,7 +72,7 @@ define <vscale x 4 x i32> @uxtb_z_32(<vscale x 4 x i32> %0) #0 {
7172
define <vscale x 4 x i32> @uxtb_m_32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1) #0 {
7273
; CHECK-LABEL: define <vscale x 4 x i32> @uxtb_m_32(
7374
; CHECK-SAME: <vscale x 4 x i32> [[TMP0:%.*]], <vscale x 4 x i32> [[TMP1:%.*]]) #[[ATTR0]] {
74-
; CHECK-NEXT: [[TMP3:%.*]] = and <vscale x 4 x i32> [[TMP0]], splat (i32 255)
75+
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> splat (i32 255))
7576
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
7677
;
7778
%3 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uxtb.nxv4i32(<vscale x 4 x i32> %1, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> %0)
@@ -81,7 +82,8 @@ define <vscale x 4 x i32> @uxtb_m_32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %
8182
define <vscale x 4 x i32> @uxtb_x_32(<vscale x 16 x i1> %0, <vscale x 4 x i32> %1) #0 {
8283
; CHECK-LABEL: define <vscale x 4 x i32> @uxtb_x_32(
8384
; CHECK-SAME: <vscale x 16 x i1> [[TMP0:%.*]], <vscale x 4 x i32> [[TMP1:%.*]]) #[[ATTR0]] {
84-
; CHECK-NEXT: [[TMP4:%.*]] = and <vscale x 4 x i32> [[TMP1]], splat (i32 255)
85+
; CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP0]])
86+
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> [[TMP3]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> splat (i32 255))
8587
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
8688
;
8789
%3 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %0)
@@ -116,7 +118,7 @@ define <vscale x 4 x i32> @uxtb_m_32_no_ptrue(<vscale x 16 x i1> %0, <vscale x 4
116118
define <vscale x 8 x i16> @uxtb_z_16(<vscale x 8 x i16> %0) #0 {
117119
; CHECK-LABEL: define <vscale x 8 x i16> @uxtb_z_16(
118120
; CHECK-SAME: <vscale x 8 x i16> [[TMP0:%.*]]) #[[ATTR0]] {
119-
; CHECK-NEXT: [[TMP2:%.*]] = and <vscale x 8 x i16> [[TMP0]], splat (i16 255)
121+
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.and.u.nxv8i16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> splat (i16 255))
120122
; CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
121123
;
122124
%2 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.uxtb.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> %0)
@@ -126,7 +128,7 @@ define <vscale x 8 x i16> @uxtb_z_16(<vscale x 8 x i16> %0) #0 {
126128
define <vscale x 8 x i16> @uxtb_m_16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1) #0 {
127129
; CHECK-LABEL: define <vscale x 8 x i16> @uxtb_m_16(
128130
; CHECK-SAME: <vscale x 8 x i16> [[TMP0:%.*]], <vscale x 8 x i16> [[TMP1:%.*]]) #[[ATTR0]] {
129-
; CHECK-NEXT: [[TMP3:%.*]] = and <vscale x 8 x i16> [[TMP0]], splat (i16 255)
131+
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.and.u.nxv8i16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> splat (i16 255))
130132
; CHECK-NEXT: ret <vscale x 8 x i16> [[TMP3]]
131133
;
132134
%3 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.uxtb.nxv8i16(<vscale x 8 x i16> %1, <vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> %0)
@@ -136,7 +138,8 @@ define <vscale x 8 x i16> @uxtb_m_16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %
136138
define <vscale x 8 x i16> @uxtb_x_16(<vscale x 16 x i1> %0, <vscale x 8 x i16> %1) #0 {
137139
; CHECK-LABEL: define <vscale x 8 x i16> @uxtb_x_16(
138140
; CHECK-SAME: <vscale x 16 x i1> [[TMP0:%.*]], <vscale x 8 x i16> [[TMP1:%.*]]) #[[ATTR0]] {
139-
; CHECK-NEXT: [[TMP4:%.*]] = and <vscale x 8 x i16> [[TMP1]], splat (i16 255)
141+
; CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[TMP0]])
142+
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.and.u.nxv8i16(<vscale x 8 x i1> [[TMP3]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> splat (i16 255))
140143
; CHECK-NEXT: ret <vscale x 8 x i16> [[TMP4]]
141144
;
142145
%3 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %0)
@@ -171,7 +174,7 @@ define <vscale x 8 x i16> @uxtb_m_16_no_ptrue(<vscale x 16 x i1> %0, <vscale x 8
171174
define <vscale x 2 x i64> @uxth_z_64(<vscale x 2 x i64> %0) #0 {
172175
; CHECK-LABEL: define <vscale x 2 x i64> @uxth_z_64(
173176
; CHECK-SAME: <vscale x 2 x i64> [[TMP0:%.*]]) #[[ATTR0]] {
174-
; CHECK-NEXT: [[TMP2:%.*]] = and <vscale x 2 x i64> [[TMP0]], splat (i64 65535)
177+
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> splat (i64 65535))
175178
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
176179
;
177180
%2 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxth.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> %0)
@@ -181,7 +184,7 @@ define <vscale x 2 x i64> @uxth_z_64(<vscale x 2 x i64> %0) #0 {
181184
define <vscale x 2 x i64> @uxth_m_64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1) #0 {
182185
; CHECK-LABEL: define <vscale x 2 x i64> @uxth_m_64(
183186
; CHECK-SAME: <vscale x 2 x i64> [[TMP0:%.*]], <vscale x 2 x i64> [[TMP1:%.*]]) #[[ATTR0]] {
184-
; CHECK-NEXT: [[TMP3:%.*]] = and <vscale x 2 x i64> [[TMP0]], splat (i64 65535)
187+
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> splat (i64 65535))
185188
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
186189
;
187190
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxth.nxv2i64(<vscale x 2 x i64> %1, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> %0)
@@ -191,7 +194,8 @@ define <vscale x 2 x i64> @uxth_m_64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %
191194
define <vscale x 2 x i64> @uxth_x_64(<vscale x 16 x i1> %0, <vscale x 2 x i64> %1) #0 {
192195
; CHECK-LABEL: define <vscale x 2 x i64> @uxth_x_64(
193196
; CHECK-SAME: <vscale x 16 x i1> [[TMP0:%.*]], <vscale x 2 x i64> [[TMP1:%.*]]) #[[ATTR0]] {
194-
; CHECK-NEXT: [[TMP4:%.*]] = and <vscale x 2 x i64> [[TMP1]], splat (i64 65535)
197+
; CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[TMP0]])
198+
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> [[TMP3]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> splat (i64 65535))
195199
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
196200
;
197201
%3 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
@@ -226,7 +230,7 @@ define <vscale x 2 x i64> @uxth_m_64_no_ptrue(<vscale x 16 x i1> %0, <vscale x 2
226230
define <vscale x 4 x i32> @uxth_z_32(<vscale x 4 x i32> %0) #0 {
227231
; CHECK-LABEL: define <vscale x 4 x i32> @uxth_z_32(
228232
; CHECK-SAME: <vscale x 4 x i32> [[TMP0:%.*]]) #[[ATTR0]] {
229-
; CHECK-NEXT: [[TMP2:%.*]] = and <vscale x 4 x i32> [[TMP0]], splat (i32 65535)
233+
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> splat (i32 65535))
230234
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
231235
;
232236
%2 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uxth.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> %0)
@@ -236,7 +240,7 @@ define <vscale x 4 x i32> @uxth_z_32(<vscale x 4 x i32> %0) #0 {
236240
define <vscale x 4 x i32> @uxth_m_32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1) #0 {
237241
; CHECK-LABEL: define <vscale x 4 x i32> @uxth_m_32(
238242
; CHECK-SAME: <vscale x 4 x i32> [[TMP0:%.*]], <vscale x 4 x i32> [[TMP1:%.*]]) #[[ATTR0]] {
239-
; CHECK-NEXT: [[TMP3:%.*]] = and <vscale x 4 x i32> [[TMP0]], splat (i32 65535)
243+
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> splat (i32 65535))
240244
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
241245
;
242246
%3 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uxth.nxv4i32(<vscale x 4 x i32> %1, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> %0)
@@ -246,7 +250,8 @@ define <vscale x 4 x i32> @uxth_m_32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %
246250
define <vscale x 4 x i32> @uxth_x_32(<vscale x 16 x i1> %0, <vscale x 4 x i32> %1) #0 {
247251
; CHECK-LABEL: define <vscale x 4 x i32> @uxth_x_32(
248252
; CHECK-SAME: <vscale x 16 x i1> [[TMP0:%.*]], <vscale x 4 x i32> [[TMP1:%.*]]) #[[ATTR0]] {
249-
; CHECK-NEXT: [[TMP4:%.*]] = and <vscale x 4 x i32> [[TMP1]], splat (i32 65535)
253+
; CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP0]])
254+
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> [[TMP3]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> splat (i32 65535))
250255
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
251256
;
252257
%3 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %0)
@@ -281,7 +286,7 @@ define <vscale x 4 x i32> @uxth_m_32_no_ptrue(<vscale x 16 x i1> %0, <vscale x 4
281286
define <vscale x 2 x i64> @uxtw_z_64(<vscale x 2 x i64> %0) #0 {
282287
; CHECK-LABEL: define <vscale x 2 x i64> @uxtw_z_64(
283288
; CHECK-SAME: <vscale x 2 x i64> [[TMP0:%.*]]) #[[ATTR0]] {
284-
; CHECK-NEXT: [[TMP2:%.*]] = and <vscale x 2 x i64> [[TMP0]], splat (i64 4294967295)
289+
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> splat (i64 4294967295))
285290
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
286291
;
287292
%2 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> %0)
@@ -291,7 +296,7 @@ define <vscale x 2 x i64> @uxtw_z_64(<vscale x 2 x i64> %0) #0 {
291296
define <vscale x 2 x i64> @uxtw_m_64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1) #0 {
292297
; CHECK-LABEL: define <vscale x 2 x i64> @uxtw_m_64(
293298
; CHECK-SAME: <vscale x 2 x i64> [[TMP0:%.*]], <vscale x 2 x i64> [[TMP1:%.*]]) #[[ATTR0]] {
294-
; CHECK-NEXT: [[TMP3:%.*]] = and <vscale x 2 x i64> [[TMP0]], splat (i64 4294967295)
299+
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> splat (i64 4294967295))
295300
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
296301
;
297302
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> %1, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> %0)
@@ -301,7 +306,8 @@ define <vscale x 2 x i64> @uxtw_m_64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %
301306
define <vscale x 2 x i64> @uxtw_x_64(<vscale x 16 x i1> %0, <vscale x 2 x i64> %1) #0 {
302307
; CHECK-LABEL: define <vscale x 2 x i64> @uxtw_x_64(
303308
; CHECK-SAME: <vscale x 16 x i1> [[TMP0:%.*]], <vscale x 2 x i64> [[TMP1:%.*]]) #[[ATTR0]] {
304-
; CHECK-NEXT: [[TMP4:%.*]] = and <vscale x 2 x i64> [[TMP1]], splat (i64 4294967295)
309+
; CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[TMP0]])
310+
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> [[TMP3]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> splat (i64 4294967295))
305311
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
306312
;
307313
%3 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)

0 commit comments

Comments
 (0)