@@ -318,10 +318,9 @@ define void @and_or_not_combine_v32i8(ptr %pa, ptr %pb, ptr %pv, ptr %dst) nounw
318
318
; CHECK-NEXT: xvld $xr1, $a2, 0
319
319
; CHECK-NEXT: xvld $xr2, $a1, 0
320
320
; CHECK-NEXT: xvseq.b $xr0, $xr1, $xr0
321
- ; CHECK-NEXT: xvxori.b $xr0, $xr0, 255
322
321
; CHECK-NEXT: xvseq.b $xr1, $xr1, $xr2
323
- ; CHECK-NEXT: xvorn .v $xr0, $xr0, $xr1
324
- ; CHECK-NEXT: xvandi .b $xr0, $xr0, 4
322
+ ; CHECK-NEXT: xvand .v $xr0, $xr0, $xr1
323
+ ; CHECK-NEXT: xvnori .b $xr0, $xr0, 251
325
324
; CHECK-NEXT: xvst $xr0, $a3, 0
326
325
; CHECK-NEXT: ret
327
326
%a = load <32 x i8 >, ptr %pa
@@ -343,12 +342,10 @@ define void @and_or_not_combine_v16i16(ptr %pa, ptr %pb, ptr %pv, ptr %dst) noun
343
342
; CHECK-NEXT: xvld $xr1, $a2, 0
344
343
; CHECK-NEXT: xvld $xr2, $a1, 0
345
344
; CHECK-NEXT: xvseq.h $xr0, $xr1, $xr0
346
- ; CHECK-NEXT: xvrepli.b $xr3, -1
347
- ; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr3
348
345
; CHECK-NEXT: xvseq.h $xr1, $xr1, $xr2
349
- ; CHECK-NEXT: xvorn.v $xr0, $xr0, $xr1
350
- ; CHECK-NEXT: xvrepli.h $xr1, 4
351
346
; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
347
+ ; CHECK-NEXT: xvrepli.h $xr1, 4
348
+ ; CHECK-NEXT: xvandn.v $xr0, $xr0, $xr1
352
349
; CHECK-NEXT: xvst $xr0, $a3, 0
353
350
; CHECK-NEXT: ret
354
351
%a = load <16 x i16 >, ptr %pa
@@ -370,12 +367,10 @@ define void @and_or_not_combine_v8i32(ptr %pa, ptr %pb, ptr %pv, ptr %dst) nounw
370
367
; CHECK-NEXT: xvld $xr1, $a2, 0
371
368
; CHECK-NEXT: xvld $xr2, $a1, 0
372
369
; CHECK-NEXT: xvseq.w $xr0, $xr1, $xr0
373
- ; CHECK-NEXT: xvrepli.b $xr3, -1
374
- ; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr3
375
370
; CHECK-NEXT: xvseq.w $xr1, $xr1, $xr2
376
- ; CHECK-NEXT: xvorn.v $xr0, $xr0, $xr1
377
- ; CHECK-NEXT: xvrepli.w $xr1, 4
378
371
; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
372
+ ; CHECK-NEXT: xvrepli.w $xr1, 4
373
+ ; CHECK-NEXT: xvandn.v $xr0, $xr0, $xr1
379
374
; CHECK-NEXT: xvst $xr0, $a3, 0
380
375
; CHECK-NEXT: ret
381
376
%a = load <8 x i32 >, ptr %pa
@@ -397,12 +392,10 @@ define void @and_or_not_combine_v4i64(ptr %pa, ptr %pb, ptr %pv, ptr %dst) nounw
397
392
; CHECK-NEXT: xvld $xr1, $a2, 0
398
393
; CHECK-NEXT: xvld $xr2, $a1, 0
399
394
; CHECK-NEXT: xvseq.d $xr0, $xr1, $xr0
400
- ; CHECK-NEXT: xvrepli.b $xr3, -1
401
- ; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr3
402
395
; CHECK-NEXT: xvseq.d $xr1, $xr1, $xr2
403
- ; CHECK-NEXT: xvorn.v $xr0, $xr0, $xr1
404
- ; CHECK-NEXT: xvrepli.d $xr1, 4
405
396
; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
397
+ ; CHECK-NEXT: xvrepli.d $xr1, 4
398
+ ; CHECK-NEXT: xvandn.v $xr0, $xr0, $xr1
406
399
; CHECK-NEXT: xvst $xr0, $a3, 0
407
400
; CHECK-NEXT: ret
408
401
%a = load <4 x i64 >, ptr %pa
@@ -421,9 +414,8 @@ define void @and_extract_subvector_not_combine_v32i8(ptr %pa, ptr %dst) nounwind
421
414
; CHECK-LABEL: and_extract_subvector_not_combine_v32i8:
422
415
; CHECK: # %bb.0:
423
416
; CHECK-NEXT: xvld $xr0, $a0, 0
424
- ; CHECK-NEXT: xvxori.b $xr0, $xr0, 255
425
417
; CHECK-NEXT: xvpermi.q $xr0, $xr0, 1
426
- ; CHECK-NEXT: vandi .b $vr0, $vr0, 4
418
+ ; CHECK-NEXT: vnori .b $vr0, $vr0, 251
427
419
; CHECK-NEXT: vst $vr0, $a1, 0
428
420
; CHECK-NEXT: ret
429
421
%a = load volatile <32 x i8 >, ptr %pa
@@ -440,11 +432,9 @@ define void @and_extract_subvector_not_combine_v16i16(ptr %pa, ptr %dst) nounwin
440
432
; CHECK-LABEL: and_extract_subvector_not_combine_v16i16:
441
433
; CHECK: # %bb.0:
442
434
; CHECK-NEXT: xvld $xr0, $a0, 0
443
- ; CHECK-NEXT: xvrepli.b $xr1, -1
444
- ; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
445
435
; CHECK-NEXT: xvpermi.q $xr0, $xr0, 1
446
436
; CHECK-NEXT: vrepli.h $vr1, 4
447
- ; CHECK-NEXT: vand .v $vr0, $vr0, $vr1
437
+ ; CHECK-NEXT: vandn .v $vr0, $vr0, $vr1
448
438
; CHECK-NEXT: vst $vr0, $a1, 0
449
439
; CHECK-NEXT: ret
450
440
%a = load volatile <16 x i16 >, ptr %pa
@@ -460,11 +450,9 @@ define void @and_extract_subvector_not_combine_v8i32(ptr %pa, ptr %dst) nounwind
460
450
; CHECK-LABEL: and_extract_subvector_not_combine_v8i32:
461
451
; CHECK: # %bb.0:
462
452
; CHECK-NEXT: xvld $xr0, $a0, 0
463
- ; CHECK-NEXT: xvrepli.b $xr1, -1
464
- ; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
465
453
; CHECK-NEXT: xvpermi.q $xr0, $xr0, 1
466
454
; CHECK-NEXT: vrepli.w $vr1, 4
467
- ; CHECK-NEXT: vand .v $vr0, $vr0, $vr1
455
+ ; CHECK-NEXT: vandn .v $vr0, $vr0, $vr1
468
456
; CHECK-NEXT: vst $vr0, $a1, 0
469
457
; CHECK-NEXT: ret
470
458
%a = load volatile <8 x i32 >, ptr %pa
@@ -479,11 +467,9 @@ define void @and_extract_subvector_not_combine_v4i64(ptr %pa, ptr %dst) nounwind
479
467
; CHECK-LABEL: and_extract_subvector_not_combine_v4i64:
480
468
; CHECK: # %bb.0:
481
469
; CHECK-NEXT: xvld $xr0, $a0, 0
482
- ; CHECK-NEXT: xvrepli.b $xr1, -1
483
- ; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr1
484
470
; CHECK-NEXT: xvpermi.q $xr0, $xr0, 1
485
471
; CHECK-NEXT: vrepli.d $vr1, 4
486
- ; CHECK-NEXT: vand .v $vr0, $vr0, $vr1
472
+ ; CHECK-NEXT: vandn .v $vr0, $vr0, $vr1
487
473
; CHECK-NEXT: vst $vr0, $a1, 0
488
474
; CHECK-NEXT: ret
489
475
%a = load volatile <4 x i64 >, ptr %pa
0 commit comments