You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
; ALL-CONVERT-NEXT: [[NEWM2:%.+]] = and <8 x i1> [[EVLM2]], %m
209
+
; ALL-CONVERT-NEXT: %r11 = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> [[NEWM2]], <8 x i32> %i0, <8 x i32> %i1, i32 8)
210
+
; ALL-CONVERT-NEXT: ret void
206
211
212
+
; ALL-CONVERT: define void @test_vp_int_vscale(<vscale x 4 x i32> %i0, <vscale x 4 x i32> %i1, <vscale x 4 x i32> %i2, <vscale x 4 x i32> %f3, <vscale x 4 x i1> %m, i32 %n) {
213
+
; ALL-CONVERT: %{{.*}} = add <vscale x 4 x i32> %i0, %i1
214
+
; ALL-CONVERT: %{{.*}} = sub <vscale x 4 x i32> %i0, %i1
215
+
; ALL-CONVERT: %{{.*}} = mul <vscale x 4 x i32> %i0, %i1
216
+
; ALL-CONVERT: [[EVLM:%.+]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 %n)
217
+
; ALL-CONVERT: [[NEWM:%.+]] = and <vscale x 4 x i1> [[EVLM]], %m
218
+
; ALL-CONVERT: [[SELONE:%.+]] = select <vscale x 4 x i1> [[NEWM]], <vscale x 4 x i32> %i1, <vscale x 4 x i32> splat (i32 1)
219
+
; ALL-CONVERT: %{{.*}} = sdiv <vscale x 4 x i32> %i0, [[SELONE]]
220
+
; ALL-CONVERT: [[EVLM2:%.+]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 %n)
221
+
; ALL-CONVERT: [[NEWM2:%.+]] = and <vscale x 4 x i1> [[EVLM2]], %m
222
+
; ALL-CONVERT: [[SELONE2:%.+]] = select <vscale x 4 x i1> [[NEWM2]], <vscale x 4 x i32> %i1, <vscale x 4 x i32> splat (i32 1)
223
+
; ALL-CONVERT: %{{.*}} = srem <vscale x 4 x i32> %i0, [[SELONE2]]
224
+
; ALL-CONVERT: [[EVLM3:%.+]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 %n)
225
+
; ALL-CONVERT: [[NEWM3:%.+]] = and <vscale x 4 x i1> [[EVLM3]], %m
226
+
; ALL-CONVERT: [[SELONE3:%.+]] = select <vscale x 4 x i1> [[NEWM3]], <vscale x 4 x i32> %i1, <vscale x 4 x i32> splat (i32 1)
227
+
; ALL-CONVERT: %{{.*}} = udiv <vscale x 4 x i32> %i0, [[SELONE3]]
228
+
; ALL-CONVERT: [[EVLM4:%.+]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 %n)
229
+
; ALL-CONVERT: [[NEWM4:%.+]] = and <vscale x 4 x i1> [[EVLM4]], %m
230
+
; ALL-CONVERT: [[SELONE4:%.+]] = select <vscale x 4 x i1> [[NEWM4]], <vscale x 4 x i32> %i1, <vscale x 4 x i32> splat (i32 1)
231
+
; ALL-CONVERT: %{{.*}} = urem <vscale x 4 x i32> %i0, [[SELONE4]]
232
+
; ALL-CONVERT: %{{.+}} = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> %i0, <vscale x 4 x i32> %i1)
233
+
; ALL-CONVERT: %{{.+}} = call <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32> %i0, <vscale x 4 x i32> %i1)
234
+
; ALL-CONVERT: %{{.+}} = call <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32> %i0, <vscale x 4 x i32> %i1)
235
+
; ALL-CONVERT: %{{.+}} = call <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32> %i0, <vscale x 4 x i32> %i1)
236
+
; ALL-CONVERT: %{{.*}} = and <vscale x 4 x i32> %i0, %i1
237
+
; ALL-CONVERT: %{{.*}} = or <vscale x 4 x i32> %i0, %i1
238
+
; ALL-CONVERT: %{{.*}} = xor <vscale x 4 x i32> %i0, %i1
239
+
; ALL-CONVERT: %{{.*}} = ashr <vscale x 4 x i32> %i0, %i1
240
+
; ALL-CONVERT: %{{.*}} = lshr <vscale x 4 x i32> %i0, %i1
241
+
; ALL-CONVERT: %{{.*}} = shl <vscale x 4 x i32> %i0, %i1
242
+
; ALL-CONVERT: [[EVLM5:%.+]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 %n)
243
+
; ALL-CONVERT: [[NEWM5:%.+]] = and <vscale x 4 x i1> [[EVLM5]], %m
244
+
; ALL-CONVERT: %r11 = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> [[NEWM5]], <vscale x 4 x i32> %i0, <vscale x 4 x i32> %i1, i32 %scalable_size{{.*}})
245
+
; ALL-CONVERT-NEXT: ret void
207
246
208
247
; Check that reductions use the correct neutral element for masked-off elements
209
248
; ALL-CONVERT: define void @test_vp_reduce_int_v4(i32 %start, <4 x i32> %vi, <4 x i1> %m, i32 %n) {
0 commit comments