@@ -430,3 +430,80 @@ define void @and_or_not_combine_v4i64(ptr %pa, ptr %pb, ptr %pv, ptr %dst) nounw
430
430
store <4 x i64 > %and , ptr %dst
431
431
ret void
432
432
}
433
+
434
+ define void @and_extract_subvector_not_combine_v32i8 (ptr %pa , ptr %dst ) nounwind {
435
+ ; CHECK-LABEL: and_extract_subvector_not_combine_v32i8:
436
+ ; CHECK: # %bb.0:
437
+ ; CHECK-NEXT: xvld $xr0, $a0, 0
438
+ ; CHECK-NEXT: xvxori.b $xr0, $xr0, 255
439
+ ; CHECK-NEXT: xvpermi.q $xr0, $xr0, 1
440
+ ; CHECK-NEXT: vandi.b $vr0, $vr0, 4
441
+ ; CHECK-NEXT: vst $vr0, $a1, 0
442
+ ; CHECK-NEXT: ret
443
+ %a = load volatile <32 x i8 >, ptr %pa
444
+ %a.not = xor <32 x i8 > %a , splat (i8 -1 )
445
+ %subv = shufflevector <32 x i8 > %a.not , <32 x i8 > poison,
446
+ <16 x i32 > <i32 16 , i32 17 , i32 18 , i32 19 , i32 20 , i32 21 , i32 22 , i32 23 ,
447
+ i32 24 , i32 25 , i32 26 , i32 27 , i32 28 , i32 29 , i32 30 , i32 31 >
448
+ %and = and <16 x i8 > %subv , splat (i8 4 )
449
+ store <16 x i8 > %and , ptr %dst
450
+ ret void
451
+ }
452
+
453
+ define void @and_extract_subvector_not_combine_v16i16 (ptr %pa , ptr %dst ) nounwind {
454
+ ; CHECK-LABEL: and_extract_subvector_not_combine_v16i16:
455
+ ; CHECK: # %bb.0:
456
+ ; CHECK-NEXT: xvld $xr0, $a0, 0
457
+ ; CHECK-NEXT: xvrepli.b $xr1, -1
458
+ ; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
459
+ ; CHECK-NEXT: xvpermi.q $xr0, $xr0, 1
460
+ ; CHECK-NEXT: vrepli.h $vr1, 4
461
+ ; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
462
+ ; CHECK-NEXT: vst $vr0, $a1, 0
463
+ ; CHECK-NEXT: ret
464
+ %a = load volatile <16 x i16 >, ptr %pa
465
+ %a.not = xor <16 x i16 > %a , splat (i16 -1 )
466
+ %subv = shufflevector <16 x i16 > %a.not , <16 x i16 > poison,
467
+ <8 x i32 > <i32 8 , i32 9 , i32 10 , i32 11 , i32 12 , i32 13 , i32 14 , i32 15 >
468
+ %and = and <8 x i16 > %subv , splat (i16 4 )
469
+ store <8 x i16 > %and , ptr %dst
470
+ ret void
471
+ }
472
+
473
+ define void @and_extract_subvector_not_combine_v8i32 (ptr %pa , ptr %dst ) nounwind {
474
+ ; CHECK-LABEL: and_extract_subvector_not_combine_v8i32:
475
+ ; CHECK: # %bb.0:
476
+ ; CHECK-NEXT: xvld $xr0, $a0, 0
477
+ ; CHECK-NEXT: xvrepli.b $xr1, -1
478
+ ; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
479
+ ; CHECK-NEXT: xvpermi.q $xr0, $xr0, 1
480
+ ; CHECK-NEXT: vrepli.w $vr1, 4
481
+ ; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
482
+ ; CHECK-NEXT: vst $vr0, $a1, 0
483
+ ; CHECK-NEXT: ret
484
+ %a = load volatile <8 x i32 >, ptr %pa
485
+ %a.not = xor <8 x i32 > %a , splat (i32 -1 )
486
+ %subv = shufflevector <8 x i32 > %a.not , <8 x i32 > poison, <4 x i32 > <i32 4 , i32 5 , i32 6 , i32 7 >
487
+ %and = and <4 x i32 > %subv , splat (i32 4 )
488
+ store <4 x i32 > %and , ptr %dst
489
+ ret void
490
+ }
491
+
492
+ define void @and_extract_subvector_not_combine_v4i64 (ptr %pa , ptr %dst ) nounwind {
493
+ ; CHECK-LABEL: and_extract_subvector_not_combine_v4i64:
494
+ ; CHECK: # %bb.0:
495
+ ; CHECK-NEXT: xvld $xr0, $a0, 0
496
+ ; CHECK-NEXT: xvrepli.b $xr1, -1
497
+ ; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
498
+ ; CHECK-NEXT: xvpermi.q $xr0, $xr0, 1
499
+ ; CHECK-NEXT: vrepli.d $vr1, 4
500
+ ; CHECK-NEXT: vand.v $vr0, $vr0, $vr1
501
+ ; CHECK-NEXT: vst $vr0, $a1, 0
502
+ ; CHECK-NEXT: ret
503
+ %a = load volatile <4 x i64 >, ptr %pa
504
+ %a.not = xor <4 x i64 > %a , splat (i64 -1 )
505
+ %subv = shufflevector <4 x i64 > %a.not , <4 x i64 > poison, <2 x i32 > <i32 2 , i32 3 >
506
+ %and = and <2 x i64 > %subv , splat (i64 4 )
507
+ store <2 x i64 > %and , ptr %dst
508
+ ret void
509
+ }
0 commit comments