You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
define <vscale x 2 x i64> @uxtb_z_64(<vscale x 2 x i64> %0) #0 {
7
7
; CHECK-LABEL: define <vscale x 2 x i64> @uxtb_z_64(
8
8
; CHECK-SAME: <vscale x 2 x i64> [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
9
-
; CHECK-NEXT: [[TMP2:%.*]] = and<vscale x 2 x i64> [[TMP0]], splat (i64 255)
9
+
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> splat (i64 255))
10
10
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
11
11
;
12
12
%2 = tailcall <vscale x 2 x i64> @llvm.aarch64.sve.uxtb.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> splat (i1true), <vscale x 2 x i64> %0)
@@ -16,7 +16,7 @@ define <vscale x 2 x i64> @uxtb_z_64(<vscale x 2 x i64> %0) #0 {
16
16
define <vscale x 2 x i64> @uxtb_m_64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1) #0 {
17
17
; CHECK-LABEL: define <vscale x 2 x i64> @uxtb_m_64(
18
18
; CHECK-SAME: <vscale x 2 x i64> [[TMP0:%.*]], <vscale x 2 x i64> [[TMP1:%.*]]) #[[ATTR0]] {
19
-
; CHECK-NEXT: [[TMP3:%.*]] = and<vscale x 2 x i64> [[TMP0]], splat (i64 255)
19
+
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> splat (i64 255))
20
20
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
21
21
;
22
22
%3 = tailcall <vscale x 2 x i64> @llvm.aarch64.sve.uxtb.nxv2i64(<vscale x 2 x i64> %1, <vscale x 2 x i1> splat (i1true), <vscale x 2 x i64> %0)
@@ -26,7 +26,8 @@ define <vscale x 2 x i64> @uxtb_m_64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %
26
26
define <vscale x 2 x i64> @uxtb_x_64(<vscale x 16 x i1> %0, <vscale x 2 x i64> %1) #0 {
27
27
; CHECK-LABEL: define <vscale x 2 x i64> @uxtb_x_64(
28
28
; CHECK-SAME: <vscale x 16 x i1> [[TMP0:%.*]], <vscale x 2 x i64> [[TMP1:%.*]]) #[[ATTR0]] {
29
-
; CHECK-NEXT: [[TMP4:%.*]] = and <vscale x 2 x i64> [[TMP1]], splat (i64 255)
29
+
; CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[TMP0]])
30
+
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> [[TMP3]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> splat (i64 255))
30
31
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
31
32
;
32
33
%3 = tailcall <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
@@ -61,7 +62,7 @@ define <vscale x 2 x i64> @uxtb_m_64_no_ptrue(<vscale x 16 x i1> %0, <vscale x 2
61
62
define <vscale x 4 x i32> @uxtb_z_32(<vscale x 4 x i32> %0) #0 {
62
63
; CHECK-LABEL: define <vscale x 4 x i32> @uxtb_z_32(
63
64
; CHECK-SAME: <vscale x 4 x i32> [[TMP0:%.*]]) #[[ATTR0]] {
64
-
; CHECK-NEXT: [[TMP2:%.*]] = and<vscale x 4 x i32> [[TMP0]], splat (i32 255)
65
+
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> splat (i32 255))
65
66
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
66
67
;
67
68
%2 = tailcall <vscale x 4 x i32> @llvm.aarch64.sve.uxtb.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> splat (i1true), <vscale x 4 x i32> %0)
@@ -71,7 +72,7 @@ define <vscale x 4 x i32> @uxtb_z_32(<vscale x 4 x i32> %0) #0 {
71
72
define <vscale x 4 x i32> @uxtb_m_32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1) #0 {
72
73
; CHECK-LABEL: define <vscale x 4 x i32> @uxtb_m_32(
73
74
; CHECK-SAME: <vscale x 4 x i32> [[TMP0:%.*]], <vscale x 4 x i32> [[TMP1:%.*]]) #[[ATTR0]] {
74
-
; CHECK-NEXT: [[TMP3:%.*]] = and<vscale x 4 x i32> [[TMP0]], splat (i32 255)
75
+
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> splat (i32 255))
75
76
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
76
77
;
77
78
%3 = tailcall <vscale x 4 x i32> @llvm.aarch64.sve.uxtb.nxv4i32(<vscale x 4 x i32> %1, <vscale x 4 x i1> splat (i1true), <vscale x 4 x i32> %0)
@@ -81,7 +82,8 @@ define <vscale x 4 x i32> @uxtb_m_32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %
81
82
define <vscale x 4 x i32> @uxtb_x_32(<vscale x 16 x i1> %0, <vscale x 4 x i32> %1) #0 {
82
83
; CHECK-LABEL: define <vscale x 4 x i32> @uxtb_x_32(
83
84
; CHECK-SAME: <vscale x 16 x i1> [[TMP0:%.*]], <vscale x 4 x i32> [[TMP1:%.*]]) #[[ATTR0]] {
84
-
; CHECK-NEXT: [[TMP4:%.*]] = and <vscale x 4 x i32> [[TMP1]], splat (i32 255)
85
+
; CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP0]])
86
+
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> [[TMP3]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> splat (i32 255))
85
87
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
86
88
;
87
89
%3 = tailcall <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %0)
@@ -116,7 +118,7 @@ define <vscale x 4 x i32> @uxtb_m_32_no_ptrue(<vscale x 16 x i1> %0, <vscale x 4
116
118
define <vscale x 8 x i16> @uxtb_z_16(<vscale x 8 x i16> %0) #0 {
117
119
; CHECK-LABEL: define <vscale x 8 x i16> @uxtb_z_16(
118
120
; CHECK-SAME: <vscale x 8 x i16> [[TMP0:%.*]]) #[[ATTR0]] {
119
-
; CHECK-NEXT: [[TMP2:%.*]] = and<vscale x 8 x i16> [[TMP0]], splat (i16 255)
121
+
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.and.u.nxv8i16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> splat (i16 255))
120
122
; CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
121
123
;
122
124
%2 = tailcall <vscale x 8 x i16> @llvm.aarch64.sve.uxtb.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> splat (i1true), <vscale x 8 x i16> %0)
@@ -126,7 +128,7 @@ define <vscale x 8 x i16> @uxtb_z_16(<vscale x 8 x i16> %0) #0 {
126
128
define <vscale x 8 x i16> @uxtb_m_16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1) #0 {
127
129
; CHECK-LABEL: define <vscale x 8 x i16> @uxtb_m_16(
128
130
; CHECK-SAME: <vscale x 8 x i16> [[TMP0:%.*]], <vscale x 8 x i16> [[TMP1:%.*]]) #[[ATTR0]] {
129
-
; CHECK-NEXT: [[TMP3:%.*]] = and<vscale x 8 x i16> [[TMP0]], splat (i16 255)
131
+
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.and.u.nxv8i16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> splat (i16 255))
130
132
; CHECK-NEXT: ret <vscale x 8 x i16> [[TMP3]]
131
133
;
132
134
%3 = tailcall <vscale x 8 x i16> @llvm.aarch64.sve.uxtb.nxv8i16(<vscale x 8 x i16> %1, <vscale x 8 x i1> splat (i1true), <vscale x 8 x i16> %0)
@@ -136,7 +138,8 @@ define <vscale x 8 x i16> @uxtb_m_16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %
136
138
define <vscale x 8 x i16> @uxtb_x_16(<vscale x 16 x i1> %0, <vscale x 8 x i16> %1) #0 {
137
139
; CHECK-LABEL: define <vscale x 8 x i16> @uxtb_x_16(
138
140
; CHECK-SAME: <vscale x 16 x i1> [[TMP0:%.*]], <vscale x 8 x i16> [[TMP1:%.*]]) #[[ATTR0]] {
139
-
; CHECK-NEXT: [[TMP4:%.*]] = and <vscale x 8 x i16> [[TMP1]], splat (i16 255)
141
+
; CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[TMP0]])
142
+
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.and.u.nxv8i16(<vscale x 8 x i1> [[TMP3]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> splat (i16 255))
140
143
; CHECK-NEXT: ret <vscale x 8 x i16> [[TMP4]]
141
144
;
142
145
%3 = tailcall <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %0)
@@ -171,7 +174,7 @@ define <vscale x 8 x i16> @uxtb_m_16_no_ptrue(<vscale x 16 x i1> %0, <vscale x 8
171
174
define <vscale x 2 x i64> @uxth_z_64(<vscale x 2 x i64> %0) #0 {
172
175
; CHECK-LABEL: define <vscale x 2 x i64> @uxth_z_64(
173
176
; CHECK-SAME: <vscale x 2 x i64> [[TMP0:%.*]]) #[[ATTR0]] {
174
-
; CHECK-NEXT: [[TMP2:%.*]] = and<vscale x 2 x i64> [[TMP0]], splat (i64 65535)
177
+
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> splat (i64 65535))
175
178
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
176
179
;
177
180
%2 = tailcall <vscale x 2 x i64> @llvm.aarch64.sve.uxth.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> splat (i1true), <vscale x 2 x i64> %0)
@@ -181,7 +184,7 @@ define <vscale x 2 x i64> @uxth_z_64(<vscale x 2 x i64> %0) #0 {
181
184
define <vscale x 2 x i64> @uxth_m_64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1) #0 {
182
185
; CHECK-LABEL: define <vscale x 2 x i64> @uxth_m_64(
183
186
; CHECK-SAME: <vscale x 2 x i64> [[TMP0:%.*]], <vscale x 2 x i64> [[TMP1:%.*]]) #[[ATTR0]] {
184
-
; CHECK-NEXT: [[TMP3:%.*]] = and<vscale x 2 x i64> [[TMP0]], splat (i64 65535)
187
+
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> splat (i64 65535))
185
188
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
186
189
;
187
190
%3 = tailcall <vscale x 2 x i64> @llvm.aarch64.sve.uxth.nxv2i64(<vscale x 2 x i64> %1, <vscale x 2 x i1> splat (i1true), <vscale x 2 x i64> %0)
@@ -191,7 +194,8 @@ define <vscale x 2 x i64> @uxth_m_64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %
191
194
define <vscale x 2 x i64> @uxth_x_64(<vscale x 16 x i1> %0, <vscale x 2 x i64> %1) #0 {
192
195
; CHECK-LABEL: define <vscale x 2 x i64> @uxth_x_64(
193
196
; CHECK-SAME: <vscale x 16 x i1> [[TMP0:%.*]], <vscale x 2 x i64> [[TMP1:%.*]]) #[[ATTR0]] {
194
-
; CHECK-NEXT: [[TMP4:%.*]] = and <vscale x 2 x i64> [[TMP1]], splat (i64 65535)
197
+
; CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[TMP0]])
198
+
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> [[TMP3]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> splat (i64 65535))
195
199
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
196
200
;
197
201
%3 = tailcall <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
@@ -226,7 +230,7 @@ define <vscale x 2 x i64> @uxth_m_64_no_ptrue(<vscale x 16 x i1> %0, <vscale x 2
226
230
define <vscale x 4 x i32> @uxth_z_32(<vscale x 4 x i32> %0) #0 {
227
231
; CHECK-LABEL: define <vscale x 4 x i32> @uxth_z_32(
228
232
; CHECK-SAME: <vscale x 4 x i32> [[TMP0:%.*]]) #[[ATTR0]] {
229
-
; CHECK-NEXT: [[TMP2:%.*]] = and<vscale x 4 x i32> [[TMP0]], splat (i32 65535)
233
+
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> splat (i32 65535))
230
234
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
231
235
;
232
236
%2 = tailcall <vscale x 4 x i32> @llvm.aarch64.sve.uxth.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> splat (i1true), <vscale x 4 x i32> %0)
@@ -236,7 +240,7 @@ define <vscale x 4 x i32> @uxth_z_32(<vscale x 4 x i32> %0) #0 {
236
240
define <vscale x 4 x i32> @uxth_m_32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1) #0 {
237
241
; CHECK-LABEL: define <vscale x 4 x i32> @uxth_m_32(
238
242
; CHECK-SAME: <vscale x 4 x i32> [[TMP0:%.*]], <vscale x 4 x i32> [[TMP1:%.*]]) #[[ATTR0]] {
239
-
; CHECK-NEXT: [[TMP3:%.*]] = and<vscale x 4 x i32> [[TMP0]], splat (i32 65535)
243
+
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> splat (i32 65535))
240
244
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
241
245
;
242
246
%3 = tailcall <vscale x 4 x i32> @llvm.aarch64.sve.uxth.nxv4i32(<vscale x 4 x i32> %1, <vscale x 4 x i1> splat (i1true), <vscale x 4 x i32> %0)
@@ -246,7 +250,8 @@ define <vscale x 4 x i32> @uxth_m_32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %
246
250
define <vscale x 4 x i32> @uxth_x_32(<vscale x 16 x i1> %0, <vscale x 4 x i32> %1) #0 {
247
251
; CHECK-LABEL: define <vscale x 4 x i32> @uxth_x_32(
248
252
; CHECK-SAME: <vscale x 16 x i1> [[TMP0:%.*]], <vscale x 4 x i32> [[TMP1:%.*]]) #[[ATTR0]] {
249
-
; CHECK-NEXT: [[TMP4:%.*]] = and <vscale x 4 x i32> [[TMP1]], splat (i32 65535)
253
+
; CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP0]])
254
+
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> [[TMP3]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> splat (i32 65535))
250
255
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
251
256
;
252
257
%3 = tailcall <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %0)
@@ -281,7 +286,7 @@ define <vscale x 4 x i32> @uxth_m_32_no_ptrue(<vscale x 16 x i1> %0, <vscale x 4
281
286
define <vscale x 2 x i64> @uxtw_z_64(<vscale x 2 x i64> %0) #0 {
282
287
; CHECK-LABEL: define <vscale x 2 x i64> @uxtw_z_64(
283
288
; CHECK-SAME: <vscale x 2 x i64> [[TMP0:%.*]]) #[[ATTR0]] {
284
-
; CHECK-NEXT: [[TMP2:%.*]] = and<vscale x 2 x i64> [[TMP0]], splat (i64 4294967295)
289
+
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> splat (i64 4294967295))
285
290
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
286
291
;
287
292
%2 = tailcall <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> splat (i1true), <vscale x 2 x i64> %0)
@@ -291,7 +296,7 @@ define <vscale x 2 x i64> @uxtw_z_64(<vscale x 2 x i64> %0) #0 {
291
296
define <vscale x 2 x i64> @uxtw_m_64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1) #0 {
292
297
; CHECK-LABEL: define <vscale x 2 x i64> @uxtw_m_64(
293
298
; CHECK-SAME: <vscale x 2 x i64> [[TMP0:%.*]], <vscale x 2 x i64> [[TMP1:%.*]]) #[[ATTR0]] {
294
-
; CHECK-NEXT: [[TMP3:%.*]] = and<vscale x 2 x i64> [[TMP0]], splat (i64 4294967295)
299
+
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> splat (i64 4294967295))
295
300
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
296
301
;
297
302
%3 = tailcall <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> %1, <vscale x 2 x i1> splat (i1true), <vscale x 2 x i64> %0)
@@ -301,7 +306,8 @@ define <vscale x 2 x i64> @uxtw_m_64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %
301
306
define <vscale x 2 x i64> @uxtw_x_64(<vscale x 16 x i1> %0, <vscale x 2 x i64> %1) #0 {
302
307
; CHECK-LABEL: define <vscale x 2 x i64> @uxtw_x_64(
303
308
; CHECK-SAME: <vscale x 16 x i1> [[TMP0:%.*]], <vscale x 2 x i64> [[TMP1:%.*]]) #[[ATTR0]] {
304
-
; CHECK-NEXT: [[TMP4:%.*]] = and <vscale x 2 x i64> [[TMP1]], splat (i64 4294967295)
309
+
; CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[TMP0]])
310
+
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> [[TMP3]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> splat (i64 4294967295))
305
311
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
306
312
;
307
313
%3 = tailcall <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
0 commit comments