|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| 2 | + |
| 3 | +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefixes=AVX512VL |
| 4 | +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxifma | FileCheck %s --check-prefixes=AVXIFMA |
| 5 | + |
| 6 | +; High-52 path |
| 7 | + |
| 8 | +declare <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64>, <2 x i64>, <2 x i64>) |
| 9 | +declare <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64>, <4 x i64>, <4 x i64>) |
| 10 | + |
| 11 | +; High-52, 25x25 masked inputs, accumulator = 1, expected constant fold. |
| 12 | +define <2 x i64> @kb52h_128_mask25_and1(<2 x i64> %x, <2 x i64> %y) { |
| 13 | +; AVX512VL-LABEL: kb52h_128_mask25_and1: |
| 14 | +; AVX512VL: # %bb.0: |
| 15 | +; AVX512VL-NEXT: vmovddup {{.*#+}} xmm0 = [1,1] |
| 16 | +; AVX512VL-NEXT: # xmm0 = mem[0,0] |
| 17 | +; AVX512VL-NEXT: retq |
| 18 | +; |
| 19 | +; AVXIFMA-LABEL: kb52h_128_mask25_and1: |
| 20 | +; AVXIFMA: # %bb.0: |
| 21 | +; AVXIFMA-NEXT: vmovddup {{.*#+}} xmm0 = [1,1] |
| 22 | +; AVXIFMA-NEXT: # xmm0 = mem[0,0] |
| 23 | +; AVXIFMA-NEXT: retq |
| 24 | + %mx = and <2 x i64> %x, splat (i64 33554431) ; (1<<25)-1 |
| 25 | + %my = and <2 x i64> %y, splat (i64 33554431) ; (1<<25)-1 |
| 26 | + %r = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128( |
| 27 | + <2 x i64> splat (i64 1), |
| 28 | + <2 x i64> %mx, |
| 29 | + <2 x i64> %my) |
| 30 | + %ret = and <2 x i64> %r, splat (i64 1) |
| 31 | + ret <2 x i64> %ret |
| 32 | +} |
| 33 | + |
| 34 | +; High-52, 25x26 masked inputs, accumulator = 1, expected constant fold. |
| 35 | +define <4 x i64> @kb52h_256_mask25x26_acc1(<4 x i64> %x, <4 x i64> %y) { |
| 36 | +; AVX512VL-LABEL: kb52h_256_mask25x26_acc1: |
| 37 | +; AVX512VL: # %bb.0: |
| 38 | +; AVX512VL-NEXT: vbroadcastsd {{.*#+}} ymm0 = [1,1,1,1] |
| 39 | +; AVX512VL-NEXT: retq |
| 40 | +; |
| 41 | +; AVXIFMA-LABEL: kb52h_256_mask25x26_acc1: |
| 42 | +; AVXIFMA: # %bb.0: |
| 43 | +; AVXIFMA-NEXT: vbroadcastsd {{.*#+}} ymm0 = [1,1,1,1] |
| 44 | +; AVXIFMA-NEXT: retq |
| 45 | + %mx = and <4 x i64> %x, splat (i64 33554431) ; (1<<25)-1 |
| 46 | + %my = and <4 x i64> %y, splat (i64 67108863) ; (1<<26)-1 |
| 47 | + %r = call <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256( |
| 48 | + <4 x i64> splat (i64 1), |
| 49 | + <4 x i64> %mx, <4 x i64> %my) |
| 50 | + ret <4 x i64> %r |
| 51 | +} |
| 52 | + |
| 53 | +; Low-52 path |
| 54 | + |
| 55 | +declare <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64>, <2 x i64>, <2 x i64>) |
| 56 | +declare <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64>, <4 x i64>, <4 x i64>) |
| 57 | + |
| 58 | +; Low-52, 26x26 masked inputs, add with accumulator. |
| 59 | +define <2 x i64> @kb52l_128_mask26x26_add_intrin(<2 x i64> %x, <2 x i64> %y, <2 x i64> %acc) { |
| 60 | +; AVX512VL-LABEL: kb52l_128_mask26x26_add_intrin: |
| 61 | +; AVX512VL: # %bb.0: |
| 62 | +; AVX512VL-NEXT: vpbroadcastq {{.*#+}} xmm3 = [67108863,67108863] |
| 63 | +; AVX512VL-NEXT: vpand %xmm3, %xmm0, %xmm0 |
| 64 | +; AVX512VL-NEXT: vpand %xmm3, %xmm1, %xmm1 |
| 65 | +; AVX512VL-NEXT: vpmadd52luq %xmm1, %xmm0, %xmm2 |
| 66 | +; AVX512VL-NEXT: vmovdqa %xmm2, %xmm0 |
| 67 | +; AVX512VL-NEXT: retq |
| 68 | +; |
| 69 | +; AVXIFMA-LABEL: kb52l_128_mask26x26_add_intrin: |
| 70 | +; AVXIFMA: # %bb.0: |
| 71 | +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} xmm3 = [67108863,67108863] |
| 72 | +; AVXIFMA-NEXT: vpand %xmm3, %xmm0, %xmm0 |
| 73 | +; AVXIFMA-NEXT: vpand %xmm3, %xmm1, %xmm1 |
| 74 | +; AVXIFMA-NEXT: {vex} vpmadd52luq %xmm1, %xmm0, %xmm2 |
| 75 | +; AVXIFMA-NEXT: vmovdqa %xmm2, %xmm0 |
| 76 | +; AVXIFMA-NEXT: retq |
| 77 | + %xm = and <2 x i64> %x, splat (i64 67108863) ; (1<<26)-1 |
| 78 | + %ym = and <2 x i64> %y, splat (i64 67108863) ; (1<<26)-1 |
| 79 | + %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128( |
| 80 | + <2 x i64> %acc, <2 x i64> %xm, <2 x i64> %ym) |
| 81 | + ret <2 x i64> %r |
| 82 | +} |
| 83 | + |
| 84 | +; Low-52, 50-bit × 2-bit masked inputs, add with accumulator. |
| 85 | +define <4 x i64> @kb52l_256_mask50x3_add_intrin(<4 x i64> %x, <4 x i64> %y, <4 x i64> %acc) { |
| 86 | +; AVX512VL-LABEL: kb52l_256_mask50x3_add_intrin: |
| 87 | +; AVX512VL: # %bb.0: |
| 88 | +; AVX512VL-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0 |
| 89 | +; AVX512VL-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm1 |
| 90 | +; AVX512VL-NEXT: vpmadd52luq %ymm1, %ymm0, %ymm2 |
| 91 | +; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0 |
| 92 | +; AVX512VL-NEXT: retq |
| 93 | +; |
| 94 | +; AVXIFMA-LABEL: kb52l_256_mask50x3_add_intrin: |
| 95 | +; AVXIFMA: # %bb.0: |
| 96 | +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1125899906842623,1125899906842623,1125899906842623,1125899906842623] |
| 97 | +; AVXIFMA-NEXT: vpand %ymm3, %ymm0, %ymm0 |
| 98 | +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm3 = [3,3,3,3] |
| 99 | +; AVXIFMA-NEXT: vpand %ymm3, %ymm1, %ymm1 |
| 100 | +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm1, %ymm0, %ymm2 |
| 101 | +; AVXIFMA-NEXT: vmovdqa %ymm2, %ymm0 |
| 102 | +; AVXIFMA-NEXT: retq |
| 103 | + %xm = and <4 x i64> %x, splat (i64 1125899906842623) ; (1<<50)-1 |
| 104 | + %ym = and <4 x i64> %y, splat (i64 3) ; (1<<2)-1 |
| 105 | + %r = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256( |
| 106 | + <4 x i64> %acc, <4 x i64> %xm, <4 x i64> %ym) |
| 107 | + ret <4 x i64> %r |
| 108 | +} |
| 109 | + |
0 commit comments