@@ -264,3 +264,63 @@ define void @post_not_and_not_combine_v4i64(ptr %res, ptr %a, i64 %b) nounwind {
264
264
store <4 x i64 > %and , ptr %res
265
265
ret void
266
266
}
267
+
268
+ define void @and_not_combine_splatimm_v32i8 (ptr %res , ptr %a0 ) nounwind {
269
+ ; CHECK-LABEL: and_not_combine_splatimm_v32i8:
270
+ ; CHECK: # %bb.0:
271
+ ; CHECK-NEXT: xvld $xr0, $a1, 0
272
+ ; CHECK-NEXT: xvrepli.b $xr1, -4
273
+ ; CHECK-NEXT: xvandn.v $xr0, $xr0, $xr1
274
+ ; CHECK-NEXT: xvst $xr0, $a0, 0
275
+ ; CHECK-NEXT: ret
276
+ %v0 = load <32 x i8 >, ptr %a0
277
+ %and = and <32 x i8 > %v0 , splat (i8 -4 )
278
+ %xor = xor <32 x i8 > %and , splat (i8 -4 )
279
+ store <32 x i8 > %xor , ptr %res
280
+ ret void
281
+ }
282
+
283
+ define void @and_not_combine_splatimm_v16i16 (ptr %res , ptr %a0 ) nounwind {
284
+ ; CHECK-LABEL: and_not_combine_splatimm_v16i16:
285
+ ; CHECK: # %bb.0:
286
+ ; CHECK-NEXT: xvld $xr0, $a1, 0
287
+ ; CHECK-NEXT: xvrepli.h $xr1, -4
288
+ ; CHECK-NEXT: xvandn.v $xr0, $xr0, $xr1
289
+ ; CHECK-NEXT: xvst $xr0, $a0, 0
290
+ ; CHECK-NEXT: ret
291
+ %v0 = load <16 x i16 >, ptr %a0
292
+ %and = and <16 x i16 > %v0 , splat (i16 -4 )
293
+ %xor = xor <16 x i16 > %and , splat (i16 -4 )
294
+ store <16 x i16 > %xor , ptr %res
295
+ ret void
296
+ }
297
+
298
+ define void @and_not_combine_splatimm_v8i32 (ptr %res , ptr %a0 ) nounwind {
299
+ ; CHECK-LABEL: and_not_combine_splatimm_v8i32:
300
+ ; CHECK: # %bb.0:
301
+ ; CHECK-NEXT: xvld $xr0, $a1, 0
302
+ ; CHECK-NEXT: xvrepli.w $xr1, -4
303
+ ; CHECK-NEXT: xvandn.v $xr0, $xr0, $xr1
304
+ ; CHECK-NEXT: xvst $xr0, $a0, 0
305
+ ; CHECK-NEXT: ret
306
+ %v0 = load <8 x i32 >, ptr %a0
307
+ %and = and <8 x i32 > %v0 , splat (i32 -4 )
308
+ %xor = xor <8 x i32 > %and , splat (i32 -4 )
309
+ store <8 x i32 > %xor , ptr %res
310
+ ret void
311
+ }
312
+
313
+ define void @and_not_combine_splatimm_v4i64 (ptr %res , ptr %a0 ) nounwind {
314
+ ; CHECK-LABEL: and_not_combine_splatimm_v4i64:
315
+ ; CHECK: # %bb.0:
316
+ ; CHECK-NEXT: xvld $xr0, $a1, 0
317
+ ; CHECK-NEXT: xvrepli.d $xr1, -4
318
+ ; CHECK-NEXT: xvandn.v $xr0, $xr0, $xr1
319
+ ; CHECK-NEXT: xvst $xr0, $a0, 0
320
+ ; CHECK-NEXT: ret
321
+ %v0 = load <4 x i64 >, ptr %a0
322
+ %and = and <4 x i64 > %v0 , splat (i64 -4 )
323
+ %xor = xor <4 x i64 > %and , splat (i64 -4 )
324
+ store <4 x i64 > %xor , ptr %res
325
+ ret void
326
+ }
0 commit comments