diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index c5d92d5034e8f..543b5a1489740 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -28999,6 +28999,30 @@ static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL, return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG); } +static SDValue LowerVectorCTLZ_GFNI(SDValue Op, const SDLoc &DL, + SelectionDAG &DAG, + const X86Subtarget &Subtarget) { + MVT VT = Op.getSimpleValueType(); + SDValue Input = Op.getOperand(0); + + assert(VT.isVector() && VT.getVectorElementType() == MVT::i8 && + "Expected vXi8 input for GFNI-based CTLZ lowering"); + + SDValue Reversed = DAG.getNode(ISD::BITREVERSE, DL, VT, Input); + + SDValue Neg = DAG.getNegative(Reversed, DL, VT); + SDValue Filtered = DAG.getNode(ISD::AND, DL, VT, Reversed, Neg); + + MVT VT64 = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64); + SDValue CTTZConst = DAG.getConstant(0xAACCF0FF00000000ULL, DL, VT64); + SDValue CTTZMatrix = DAG.getBitcast(VT, CTTZConst); + + SDValue LZCNT = + DAG.getNode(X86ISD::GF2P8AFFINEQB, DL, VT, Filtered, CTTZMatrix, + DAG.getTargetConstant(8, DL, MVT::i8)); + return LZCNT; +} + static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG) { MVT VT = Op.getSimpleValueType(); @@ -29007,6 +29031,9 @@ static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget, SDLoc dl(Op); unsigned Opc = Op.getOpcode(); + if (VT.isVector() && VT.getScalarType() == MVT::i8 && Subtarget.hasGFNI()) + return LowerVectorCTLZ_GFNI(Op, dl, DAG, Subtarget); + if (VT.isVector()) return LowerVectorCTLZ(Op, dl, Subtarget, DAG); diff --git a/llvm/test/CodeGen/X86/gfni-lzcnt.ll b/llvm/test/CodeGen/X86/gfni-lzcnt.ll index 8e48950c32cd8..6e93f218f1c15 100644 --- a/llvm/test/CodeGen/X86/gfni-lzcnt.ll +++ b/llvm/test/CodeGen/X86/gfni-lzcnt.ll @@ -8,40 +8,29 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind { ; GFNISSE-LABEL: testv16i8: ; GFNISSE: # %bb.0: -; GFNISSE-NEXT: movq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNISSE-NEXT: movdqa %xmm1, %xmm2 -; GFNISSE-NEXT: pshufb %xmm0, %xmm2 ; GFNISSE-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; GFNISSE-NEXT: pxor %xmm3, %xmm3 -; GFNISSE-NEXT: pcmpeqb %xmm0, %xmm3 -; GFNISSE-NEXT: pand %xmm2, %xmm3 -; GFNISSE-NEXT: pshufb %xmm0, %xmm1 -; GFNISSE-NEXT: paddb %xmm3, %xmm1 -; GFNISSE-NEXT: movdqa %xmm1, %xmm0 +; GFNISSE-NEXT: pxor %xmm1, %xmm1 +; GFNISSE-NEXT: psubb %xmm0, %xmm1 +; GFNISSE-NEXT: pand %xmm1, %xmm0 +; GFNISSE-NEXT: gf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; GFNISSE-NEXT: retq ; ; GFNIAVX1OR2-LABEL: testv16i8: ; GFNIAVX1OR2: # %bb.0: -; GFNIAVX1OR2-NEXT: vmovq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX1OR2-NEXT: vpshufb %xmm0, %xmm1, %xmm2 ; GFNIAVX1OR2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; GFNIAVX1OR2-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; GFNIAVX1OR2-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm3 -; GFNIAVX1OR2-NEXT: vpand %xmm3, %xmm2, %xmm2 -; GFNIAVX1OR2-NEXT: vpshufb %xmm0, %xmm1, %xmm0 -; GFNIAVX1OR2-NEXT: vpaddb %xmm0, %xmm2, %xmm0 +; GFNIAVX1OR2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; GFNIAVX1OR2-NEXT: vpsubb %xmm0, %xmm1, %xmm1 +; GFNIAVX1OR2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; GFNIAVX1OR2-NEXT: vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; GFNIAVX1OR2-NEXT: retq ; ; GFNIAVX512-LABEL: testv16i8: ; GFNIAVX512: # %bb.0: -; GFNIAVX512-NEXT: vmovq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX512-NEXT: vpshufb %xmm0, %xmm1, %xmm2 ; GFNIAVX512-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0 -; GFNIAVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; GFNIAVX512-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm3 -; GFNIAVX512-NEXT: vpand %xmm3, %xmm2, %xmm2 -; GFNIAVX512-NEXT: vpshufb %xmm0, %xmm1, %xmm0 -; GFNIAVX512-NEXT: vpaddb %xmm0, %xmm2, %xmm0 +; GFNIAVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; GFNIAVX512-NEXT: vpsubb %xmm0, %xmm1, %xmm1 +; GFNIAVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 +; GFNIAVX512-NEXT: vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0 ; GFNIAVX512-NEXT: retq %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 0) ret <16 x i8> %out @@ -50,40 +39,29 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind { define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind { ; GFNISSE-LABEL: testv16i8u: ; GFNISSE: # %bb.0: -; GFNISSE-NEXT: movq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNISSE-NEXT: movdqa %xmm1, %xmm2 -; GFNISSE-NEXT: pshufb %xmm0, %xmm2 ; GFNISSE-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; GFNISSE-NEXT: pxor %xmm3, %xmm3 -; GFNISSE-NEXT: pcmpeqb %xmm0, %xmm3 -; GFNISSE-NEXT: pand %xmm2, %xmm3 -; GFNISSE-NEXT: pshufb %xmm0, %xmm1 -; GFNISSE-NEXT: paddb %xmm3, %xmm1 -; GFNISSE-NEXT: movdqa %xmm1, %xmm0 +; GFNISSE-NEXT: pxor %xmm1, %xmm1 +; GFNISSE-NEXT: psubb %xmm0, %xmm1 +; GFNISSE-NEXT: pand %xmm1, %xmm0 +; GFNISSE-NEXT: gf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; GFNISSE-NEXT: retq ; ; GFNIAVX1OR2-LABEL: testv16i8u: ; GFNIAVX1OR2: # %bb.0: -; GFNIAVX1OR2-NEXT: vmovq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX1OR2-NEXT: vpshufb %xmm0, %xmm1, %xmm2 ; GFNIAVX1OR2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; GFNIAVX1OR2-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; GFNIAVX1OR2-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm3 -; GFNIAVX1OR2-NEXT: vpand %xmm3, %xmm2, %xmm2 -; GFNIAVX1OR2-NEXT: vpshufb %xmm0, %xmm1, %xmm0 -; GFNIAVX1OR2-NEXT: vpaddb %xmm0, %xmm2, %xmm0 +; GFNIAVX1OR2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; GFNIAVX1OR2-NEXT: vpsubb %xmm0, %xmm1, %xmm1 +; GFNIAVX1OR2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; GFNIAVX1OR2-NEXT: vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; GFNIAVX1OR2-NEXT: retq ; ; GFNIAVX512-LABEL: testv16i8u: ; GFNIAVX512: # %bb.0: -; GFNIAVX512-NEXT: vmovq {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX512-NEXT: vpshufb %xmm0, %xmm1, %xmm2 ; GFNIAVX512-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0 -; GFNIAVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; GFNIAVX512-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm3 -; GFNIAVX512-NEXT: vpand %xmm3, %xmm2, %xmm2 -; GFNIAVX512-NEXT: vpshufb %xmm0, %xmm1, %xmm0 -; GFNIAVX512-NEXT: vpaddb %xmm0, %xmm2, %xmm0 +; GFNIAVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; GFNIAVX512-NEXT: vpsubb %xmm0, %xmm1, %xmm1 +; GFNIAVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 +; GFNIAVX512-NEXT: vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0 ; GFNIAVX512-NEXT: retq %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 -1) ret <16 x i8> %out @@ -92,73 +70,53 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind { define <32 x i8> @testv32i8(<32 x i8> %in) nounwind { ; GFNISSE-LABEL: testv32i8: ; GFNISSE: # %bb.0: -; GFNISSE-NEXT: movq {{.*#+}} xmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNISSE-NEXT: movdqa %xmm2, %xmm3 -; GFNISSE-NEXT: pshufb %xmm0, %xmm3 -; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16] -; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm0 -; GFNISSE-NEXT: pxor %xmm5, %xmm5 -; GFNISSE-NEXT: movdqa %xmm2, %xmm6 -; GFNISSE-NEXT: pshufb %xmm0, %xmm6 -; GFNISSE-NEXT: pcmpeqb %xmm5, %xmm0 -; GFNISSE-NEXT: pand %xmm3, %xmm0 -; GFNISSE-NEXT: paddb %xmm6, %xmm0 -; GFNISSE-NEXT: movdqa %xmm2, %xmm3 -; GFNISSE-NEXT: pshufb %xmm1, %xmm3 -; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm1 -; GFNISSE-NEXT: pcmpeqb %xmm1, %xmm5 -; GFNISSE-NEXT: pand %xmm3, %xmm5 -; GFNISSE-NEXT: pshufb %xmm1, %xmm2 -; GFNISSE-NEXT: paddb %xmm5, %xmm2 -; GFNISSE-NEXT: movdqa %xmm2, %xmm1 +; GFNISSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; GFNISSE-NEXT: gf2p8affineqb $0, %xmm2, %xmm0 +; GFNISSE-NEXT: pxor %xmm3, %xmm3 +; GFNISSE-NEXT: pxor %xmm4, %xmm4 +; GFNISSE-NEXT: psubb %xmm0, %xmm4 +; GFNISSE-NEXT: pand %xmm4, %xmm0 +; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [12307476859704049664,12307476859704049664] +; GFNISSE-NEXT: gf2p8affineqb $8, %xmm4, %xmm0 +; GFNISSE-NEXT: gf2p8affineqb $0, %xmm2, %xmm1 +; GFNISSE-NEXT: psubb %xmm1, %xmm3 +; GFNISSE-NEXT: pand %xmm3, %xmm1 +; GFNISSE-NEXT: gf2p8affineqb $8, %xmm4, %xmm1 ; GFNISSE-NEXT: retq ; ; GFNIAVX1-LABEL: testv32i8: ; GFNIAVX1: # %bb.0: ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; GFNIAVX1-NEXT: vmovq {{.*#+}} xmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm3 -; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16] -; GFNIAVX1-NEXT: # xmm4 = mem[0,0] -; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm1, %xmm1 -; GFNIAVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5 -; GFNIAVX1-NEXT: vpcmpeqb %xmm5, %xmm1, %xmm6 -; GFNIAVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 -; GFNIAVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1 -; GFNIAVX1-NEXT: vpaddb %xmm1, %xmm3, %xmm1 -; GFNIAVX1-NEXT: vpshufb %xmm0, %xmm2, %xmm3 -; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm0, %xmm0 -; GFNIAVX1-NEXT: vpcmpeqb %xmm5, %xmm0, %xmm4 -; GFNIAVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 -; GFNIAVX1-NEXT: vpshufb %xmm0, %xmm2, %xmm0 -; GFNIAVX1-NEXT: vpaddb %xmm0, %xmm3, %xmm0 +; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; GFNIAVX1-NEXT: # xmm2 = mem[0,0] +; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm2, %xmm1, %xmm1 +; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm2, %xmm0, %xmm0 +; GFNIAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2 +; GFNIAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; GFNIAVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm1 +; GFNIAVX1-NEXT: vpsubb %xmm0, %xmm3, %xmm0 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; GFNIAVX1-NEXT: vandps %ymm0, %ymm2, %ymm0 +; GFNIAVX1-NEXT: vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; GFNIAVX1-NEXT: retq ; ; GFNIAVX2-LABEL: testv32i8: ; GFNIAVX2: # %bb.0: -; GFNIAVX2-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX2-NEXT: # ymm1 = mem[0,1,0,1] -; GFNIAVX2-NEXT: vpshufb %ymm0, %ymm1, %ymm2 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 -; GFNIAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; GFNIAVX2-NEXT: vpcmpeqb %ymm3, %ymm0, %ymm3 -; GFNIAVX2-NEXT: vpand %ymm3, %ymm2, %ymm2 -; GFNIAVX2-NEXT: vpshufb %ymm0, %ymm1, %ymm0 -; GFNIAVX2-NEXT: vpaddb %ymm0, %ymm2, %ymm0 +; GFNIAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; GFNIAVX2-NEXT: vpsubb %ymm0, %ymm1, %ymm1 +; GFNIAVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664] +; GFNIAVX2-NEXT: vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0 ; GFNIAVX2-NEXT: retq ; ; GFNIAVX512-LABEL: testv32i8: ; GFNIAVX512: # %bb.0: -; GFNIAVX512-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX512-NEXT: # ymm1 = mem[0,1,0,1] -; GFNIAVX512-NEXT: vpshufb %ymm0, %ymm1, %ymm2 ; GFNIAVX512-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0 -; GFNIAVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; GFNIAVX512-NEXT: vpcmpeqb %ymm3, %ymm0, %ymm3 -; GFNIAVX512-NEXT: vpand %ymm3, %ymm2, %ymm2 -; GFNIAVX512-NEXT: vpshufb %ymm0, %ymm1, %ymm0 -; GFNIAVX512-NEXT: vpaddb %ymm0, %ymm2, %ymm0 +; GFNIAVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; GFNIAVX512-NEXT: vpsubb %ymm0, %ymm1, %ymm1 +; GFNIAVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 +; GFNIAVX512-NEXT: vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0 ; GFNIAVX512-NEXT: retq %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 0) ret <32 x i8> %out @@ -167,73 +125,53 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind { define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind { ; GFNISSE-LABEL: testv32i8u: ; GFNISSE: # %bb.0: -; GFNISSE-NEXT: movq {{.*#+}} xmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNISSE-NEXT: movdqa %xmm2, %xmm3 -; GFNISSE-NEXT: pshufb %xmm0, %xmm3 -; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16] -; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm0 -; GFNISSE-NEXT: pxor %xmm5, %xmm5 -; GFNISSE-NEXT: movdqa %xmm2, %xmm6 -; GFNISSE-NEXT: pshufb %xmm0, %xmm6 -; GFNISSE-NEXT: pcmpeqb %xmm5, %xmm0 -; GFNISSE-NEXT: pand %xmm3, %xmm0 -; GFNISSE-NEXT: paddb %xmm6, %xmm0 -; GFNISSE-NEXT: movdqa %xmm2, %xmm3 -; GFNISSE-NEXT: pshufb %xmm1, %xmm3 -; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm1 -; GFNISSE-NEXT: pcmpeqb %xmm1, %xmm5 -; GFNISSE-NEXT: pand %xmm3, %xmm5 -; GFNISSE-NEXT: pshufb %xmm1, %xmm2 -; GFNISSE-NEXT: paddb %xmm5, %xmm2 -; GFNISSE-NEXT: movdqa %xmm2, %xmm1 +; GFNISSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; GFNISSE-NEXT: gf2p8affineqb $0, %xmm2, %xmm0 +; GFNISSE-NEXT: pxor %xmm3, %xmm3 +; GFNISSE-NEXT: pxor %xmm4, %xmm4 +; GFNISSE-NEXT: psubb %xmm0, %xmm4 +; GFNISSE-NEXT: pand %xmm4, %xmm0 +; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [12307476859704049664,12307476859704049664] +; GFNISSE-NEXT: gf2p8affineqb $8, %xmm4, %xmm0 +; GFNISSE-NEXT: gf2p8affineqb $0, %xmm2, %xmm1 +; GFNISSE-NEXT: psubb %xmm1, %xmm3 +; GFNISSE-NEXT: pand %xmm3, %xmm1 +; GFNISSE-NEXT: gf2p8affineqb $8, %xmm4, %xmm1 ; GFNISSE-NEXT: retq ; ; GFNIAVX1-LABEL: testv32i8u: ; GFNIAVX1: # %bb.0: ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; GFNIAVX1-NEXT: vmovq {{.*#+}} xmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm3 -; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16] -; GFNIAVX1-NEXT: # xmm4 = mem[0,0] -; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm1, %xmm1 -; GFNIAVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5 -; GFNIAVX1-NEXT: vpcmpeqb %xmm5, %xmm1, %xmm6 -; GFNIAVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 -; GFNIAVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1 -; GFNIAVX1-NEXT: vpaddb %xmm1, %xmm3, %xmm1 -; GFNIAVX1-NEXT: vpshufb %xmm0, %xmm2, %xmm3 -; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm0, %xmm0 -; GFNIAVX1-NEXT: vpcmpeqb %xmm5, %xmm0, %xmm4 -; GFNIAVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 -; GFNIAVX1-NEXT: vpshufb %xmm0, %xmm2, %xmm0 -; GFNIAVX1-NEXT: vpaddb %xmm0, %xmm3, %xmm0 +; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; GFNIAVX1-NEXT: # xmm2 = mem[0,0] +; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm2, %xmm1, %xmm1 +; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm2, %xmm0, %xmm0 +; GFNIAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2 +; GFNIAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; GFNIAVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm1 +; GFNIAVX1-NEXT: vpsubb %xmm0, %xmm3, %xmm0 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; GFNIAVX1-NEXT: vandps %ymm0, %ymm2, %ymm0 +; GFNIAVX1-NEXT: vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; GFNIAVX1-NEXT: retq ; ; GFNIAVX2-LABEL: testv32i8u: ; GFNIAVX2: # %bb.0: -; GFNIAVX2-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX2-NEXT: # ymm1 = mem[0,1,0,1] -; GFNIAVX2-NEXT: vpshufb %ymm0, %ymm1, %ymm2 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 -; GFNIAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; GFNIAVX2-NEXT: vpcmpeqb %ymm3, %ymm0, %ymm3 -; GFNIAVX2-NEXT: vpand %ymm3, %ymm2, %ymm2 -; GFNIAVX2-NEXT: vpshufb %ymm0, %ymm1, %ymm0 -; GFNIAVX2-NEXT: vpaddb %ymm0, %ymm2, %ymm0 +; GFNIAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; GFNIAVX2-NEXT: vpsubb %ymm0, %ymm1, %ymm1 +; GFNIAVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664] +; GFNIAVX2-NEXT: vgf2p8affineqb $8, %ymm1, %ymm0, %ymm0 ; GFNIAVX2-NEXT: retq ; ; GFNIAVX512-LABEL: testv32i8u: ; GFNIAVX512: # %bb.0: -; GFNIAVX512-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX512-NEXT: # ymm1 = mem[0,1,0,1] -; GFNIAVX512-NEXT: vpshufb %ymm0, %ymm1, %ymm2 ; GFNIAVX512-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0 -; GFNIAVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; GFNIAVX512-NEXT: vpcmpeqb %ymm3, %ymm0, %ymm3 -; GFNIAVX512-NEXT: vpand %ymm3, %ymm2, %ymm2 -; GFNIAVX512-NEXT: vpshufb %ymm0, %ymm1, %ymm0 -; GFNIAVX512-NEXT: vpaddb %ymm0, %ymm2, %ymm0 +; GFNIAVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; GFNIAVX512-NEXT: vpsubb %ymm0, %ymm1, %ymm1 +; GFNIAVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 +; GFNIAVX512-NEXT: vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0 ; GFNIAVX512-NEXT: retq %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 -1) ret <32 x i8> %out @@ -242,130 +180,93 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind { define <64 x i8> @testv64i8(<64 x i8> %in) nounwind { ; GFNISSE-LABEL: testv64i8: ; GFNISSE: # %bb.0: -; GFNISSE-NEXT: movdqa %xmm3, %xmm4 -; GFNISSE-NEXT: movq {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNISSE-NEXT: movdqa %xmm3, %xmm7 -; GFNISSE-NEXT: pshufb %xmm0, %xmm7 -; GFNISSE-NEXT: movdqa {{.*#+}} xmm6 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16] -; GFNISSE-NEXT: gf2p8affineqb $0, %xmm6, %xmm0 +; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm0 ; GFNISSE-NEXT: pxor %xmm5, %xmm5 -; GFNISSE-NEXT: movdqa %xmm3, %xmm8 -; GFNISSE-NEXT: pshufb %xmm0, %xmm8 -; GFNISSE-NEXT: pcmpeqb %xmm5, %xmm0 -; GFNISSE-NEXT: pand %xmm7, %xmm0 -; GFNISSE-NEXT: paddb %xmm8, %xmm0 -; GFNISSE-NEXT: movdqa %xmm3, %xmm7 -; GFNISSE-NEXT: pshufb %xmm1, %xmm7 -; GFNISSE-NEXT: gf2p8affineqb $0, %xmm6, %xmm1 -; GFNISSE-NEXT: movdqa %xmm3, %xmm8 -; GFNISSE-NEXT: pshufb %xmm1, %xmm8 -; GFNISSE-NEXT: pcmpeqb %xmm5, %xmm1 +; GFNISSE-NEXT: pxor %xmm6, %xmm6 +; GFNISSE-NEXT: psubb %xmm0, %xmm6 +; GFNISSE-NEXT: pand %xmm6, %xmm0 +; GFNISSE-NEXT: movdqa {{.*#+}} xmm6 = [12307476859704049664,12307476859704049664] +; GFNISSE-NEXT: gf2p8affineqb $8, %xmm6, %xmm0 +; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm1 +; GFNISSE-NEXT: pxor %xmm7, %xmm7 +; GFNISSE-NEXT: psubb %xmm1, %xmm7 ; GFNISSE-NEXT: pand %xmm7, %xmm1 -; GFNISSE-NEXT: paddb %xmm8, %xmm1 -; GFNISSE-NEXT: movdqa %xmm3, %xmm7 -; GFNISSE-NEXT: pshufb %xmm2, %xmm7 -; GFNISSE-NEXT: gf2p8affineqb $0, %xmm6, %xmm2 -; GFNISSE-NEXT: movdqa %xmm3, %xmm8 -; GFNISSE-NEXT: pshufb %xmm2, %xmm8 -; GFNISSE-NEXT: pcmpeqb %xmm5, %xmm2 +; GFNISSE-NEXT: gf2p8affineqb $8, %xmm6, %xmm1 +; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm2 +; GFNISSE-NEXT: pxor %xmm7, %xmm7 +; GFNISSE-NEXT: psubb %xmm2, %xmm7 ; GFNISSE-NEXT: pand %xmm7, %xmm2 -; GFNISSE-NEXT: paddb %xmm8, %xmm2 -; GFNISSE-NEXT: movdqa %xmm3, %xmm7 -; GFNISSE-NEXT: pshufb %xmm4, %xmm7 -; GFNISSE-NEXT: gf2p8affineqb $0, %xmm6, %xmm4 -; GFNISSE-NEXT: pcmpeqb %xmm4, %xmm5 -; GFNISSE-NEXT: pand %xmm7, %xmm5 -; GFNISSE-NEXT: pshufb %xmm4, %xmm3 -; GFNISSE-NEXT: paddb %xmm5, %xmm3 +; GFNISSE-NEXT: gf2p8affineqb $8, %xmm6, %xmm2 +; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm3 +; GFNISSE-NEXT: psubb %xmm3, %xmm5 +; GFNISSE-NEXT: pand %xmm5, %xmm3 +; GFNISSE-NEXT: gf2p8affineqb $8, %xmm6, %xmm3 ; GFNISSE-NEXT: retq ; ; GFNIAVX1-LABEL: testv64i8: ; GFNIAVX1: # %bb.0: ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; GFNIAVX1-NEXT: vmovq {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm4 -; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm5 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16] -; GFNIAVX1-NEXT: # xmm5 = mem[0,0] -; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm2, %xmm2 -; GFNIAVX1-NEXT: vpxor %xmm6, %xmm6, %xmm6 -; GFNIAVX1-NEXT: vpcmpeqb %xmm6, %xmm2, %xmm7 -; GFNIAVX1-NEXT: vpand %xmm7, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2 -; GFNIAVX1-NEXT: vpaddb %xmm2, %xmm4, %xmm2 -; GFNIAVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm4 -; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm0, %xmm0 -; GFNIAVX1-NEXT: vpcmpeqb %xmm6, %xmm0, %xmm7 -; GFNIAVX1-NEXT: vpand %xmm7, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm0 -; GFNIAVX1-NEXT: vpaddb %xmm0, %xmm4, %xmm0 +; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; GFNIAVX1-NEXT: # xmm3 = mem[0,0] +; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm3, %xmm2, %xmm2 +; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0 +; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4 +; GFNIAVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5 +; GFNIAVX1-NEXT: vpsubb %xmm2, %xmm5, %xmm2 +; GFNIAVX1-NEXT: vpsubb %xmm0, %xmm5, %xmm0 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; GFNIAVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm4 -; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm2, %xmm2 -; GFNIAVX1-NEXT: vpcmpeqb %xmm6, %xmm2, %xmm7 -; GFNIAVX1-NEXT: vpand %xmm7, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2 -; GFNIAVX1-NEXT: vpaddb %xmm2, %xmm4, %xmm2 -; GFNIAVX1-NEXT: vpshufb %xmm1, %xmm3, %xmm4 -; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm1, %xmm1 -; GFNIAVX1-NEXT: vpcmpeqb %xmm6, %xmm1, %xmm5 -; GFNIAVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpshufb %xmm1, %xmm3, %xmm1 -; GFNIAVX1-NEXT: vpaddb %xmm1, %xmm4, %xmm1 -; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; GFNIAVX1-NEXT: vandps %ymm0, %ymm4, %ymm0 +; GFNIAVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664] +; GFNIAVX1-NEXT: vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0 +; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 +; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm3, %xmm4, %xmm4 +; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1 +; GFNIAVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm3 +; GFNIAVX1-NEXT: vpsubb %xmm4, %xmm5, %xmm4 +; GFNIAVX1-NEXT: vpsubb %xmm1, %xmm5, %xmm1 +; GFNIAVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 +; GFNIAVX1-NEXT: vandps %ymm1, %ymm3, %ymm1 +; GFNIAVX1-NEXT: vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1 ; GFNIAVX1-NEXT: retq ; ; GFNIAVX2-LABEL: testv64i8: ; GFNIAVX2: # %bb.0: -; GFNIAVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX2-NEXT: # ymm2 = mem[0,1,0,1] -; GFNIAVX2-NEXT: vpshufb %ymm0, %ymm2, %ymm3 -; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16] -; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm4, %ymm0, %ymm0 -; GFNIAVX2-NEXT: vpxor %xmm5, %xmm5, %xmm5 -; GFNIAVX2-NEXT: vpcmpeqb %ymm5, %ymm0, %ymm6 -; GFNIAVX2-NEXT: vpand %ymm6, %ymm3, %ymm3 -; GFNIAVX2-NEXT: vpshufb %ymm0, %ymm2, %ymm0 -; GFNIAVX2-NEXT: vpaddb %ymm0, %ymm3, %ymm0 -; GFNIAVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm3 -; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm4, %ymm1, %ymm1 -; GFNIAVX2-NEXT: vpcmpeqb %ymm5, %ymm1, %ymm4 -; GFNIAVX2-NEXT: vpand %ymm4, %ymm3, %ymm3 -; GFNIAVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm1 -; GFNIAVX2-NEXT: vpaddb %ymm1, %ymm3, %ymm1 +; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0 +; GFNIAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; GFNIAVX2-NEXT: vpsubb %ymm0, %ymm3, %ymm4 +; GFNIAVX2-NEXT: vpand %ymm4, %ymm0, %ymm0 +; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664] +; GFNIAVX2-NEXT: vgf2p8affineqb $8, %ymm4, %ymm0, %ymm0 +; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1 +; GFNIAVX2-NEXT: vpsubb %ymm1, %ymm3, %ymm2 +; GFNIAVX2-NEXT: vpand %ymm2, %ymm1, %ymm1 +; GFNIAVX2-NEXT: vgf2p8affineqb $8, %ymm4, %ymm1, %ymm1 ; GFNIAVX2-NEXT: retq ; ; GFNIAVX512VL-LABEL: testv64i8: ; GFNIAVX512VL: # %bb.0: ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; GFNIAVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX512VL-NEXT: # ymm2 = mem[0,1,0,1] -; GFNIAVX512VL-NEXT: vpshufb %ymm1, %ymm2, %ymm3 -; GFNIAVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16] -; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm4, %ymm1, %ymm1 -; GFNIAVX512VL-NEXT: vpxor %xmm5, %xmm5, %xmm5 -; GFNIAVX512VL-NEXT: vpcmpeqb %ymm5, %ymm1, %ymm6 -; GFNIAVX512VL-NEXT: vpand %ymm6, %ymm3, %ymm3 -; GFNIAVX512VL-NEXT: vpshufb %ymm1, %ymm2, %ymm1 -; GFNIAVX512VL-NEXT: vpaddb %ymm1, %ymm3, %ymm1 -; GFNIAVX512VL-NEXT: vpshufb %ymm0, %ymm2, %ymm3 -; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm4, %ymm0, %ymm0 -; GFNIAVX512VL-NEXT: vpcmpeqb %ymm5, %ymm0, %ymm4 -; GFNIAVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3 -; GFNIAVX512VL-NEXT: vpshufb %ymm0, %ymm2, %ymm0 -; GFNIAVX512VL-NEXT: vpaddb %ymm0, %ymm3, %ymm0 +; GFNIAVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1 +; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0 +; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm2 +; GFNIAVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; GFNIAVX512VL-NEXT: vpsubb %ymm1, %ymm3, %ymm1 +; GFNIAVX512VL-NEXT: vpsubb %ymm0, %ymm3, %ymm0 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; GFNIAVX512VL-NEXT: vpandq %zmm0, %zmm2, %zmm0 +; GFNIAVX512VL-NEXT: vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 ; GFNIAVX512VL-NEXT: retq ; ; GFNIAVX512BW-LABEL: testv64i8: ; GFNIAVX512BW: # %bb.0: -; GFNIAVX512BW-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm1 -; GFNIAVX512BW-NEXT: vptestnmb %zmm1, %zmm1, %k1 -; GFNIAVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX512BW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] -; GFNIAVX512BW-NEXT: vpshufb %zmm0, %zmm2, %zmm0 {%k1} {z} -; GFNIAVX512BW-NEXT: vpshufb %zmm1, %zmm2, %zmm1 -; GFNIAVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; GFNIAVX512BW-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; GFNIAVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; GFNIAVX512BW-NEXT: vpsubb %zmm0, %zmm1, %zmm1 +; GFNIAVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; GFNIAVX512BW-NEXT: vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 ; GFNIAVX512BW-NEXT: retq %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 0) ret <64 x i8> %out @@ -374,130 +275,93 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind { define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind { ; GFNISSE-LABEL: testv64i8u: ; GFNISSE: # %bb.0: -; GFNISSE-NEXT: movdqa %xmm3, %xmm4 -; GFNISSE-NEXT: movq {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNISSE-NEXT: movdqa %xmm3, %xmm7 -; GFNISSE-NEXT: pshufb %xmm0, %xmm7 -; GFNISSE-NEXT: movdqa {{.*#+}} xmm6 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16] -; GFNISSE-NEXT: gf2p8affineqb $0, %xmm6, %xmm0 +; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm0 ; GFNISSE-NEXT: pxor %xmm5, %xmm5 -; GFNISSE-NEXT: movdqa %xmm3, %xmm8 -; GFNISSE-NEXT: pshufb %xmm0, %xmm8 -; GFNISSE-NEXT: pcmpeqb %xmm5, %xmm0 -; GFNISSE-NEXT: pand %xmm7, %xmm0 -; GFNISSE-NEXT: paddb %xmm8, %xmm0 -; GFNISSE-NEXT: movdqa %xmm3, %xmm7 -; GFNISSE-NEXT: pshufb %xmm1, %xmm7 -; GFNISSE-NEXT: gf2p8affineqb $0, %xmm6, %xmm1 -; GFNISSE-NEXT: movdqa %xmm3, %xmm8 -; GFNISSE-NEXT: pshufb %xmm1, %xmm8 -; GFNISSE-NEXT: pcmpeqb %xmm5, %xmm1 +; GFNISSE-NEXT: pxor %xmm6, %xmm6 +; GFNISSE-NEXT: psubb %xmm0, %xmm6 +; GFNISSE-NEXT: pand %xmm6, %xmm0 +; GFNISSE-NEXT: movdqa {{.*#+}} xmm6 = [12307476859704049664,12307476859704049664] +; GFNISSE-NEXT: gf2p8affineqb $8, %xmm6, %xmm0 +; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm1 +; GFNISSE-NEXT: pxor %xmm7, %xmm7 +; GFNISSE-NEXT: psubb %xmm1, %xmm7 ; GFNISSE-NEXT: pand %xmm7, %xmm1 -; GFNISSE-NEXT: paddb %xmm8, %xmm1 -; GFNISSE-NEXT: movdqa %xmm3, %xmm7 -; GFNISSE-NEXT: pshufb %xmm2, %xmm7 -; GFNISSE-NEXT: gf2p8affineqb $0, %xmm6, %xmm2 -; GFNISSE-NEXT: movdqa %xmm3, %xmm8 -; GFNISSE-NEXT: pshufb %xmm2, %xmm8 -; GFNISSE-NEXT: pcmpeqb %xmm5, %xmm2 +; GFNISSE-NEXT: gf2p8affineqb $8, %xmm6, %xmm1 +; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm2 +; GFNISSE-NEXT: pxor %xmm7, %xmm7 +; GFNISSE-NEXT: psubb %xmm2, %xmm7 ; GFNISSE-NEXT: pand %xmm7, %xmm2 -; GFNISSE-NEXT: paddb %xmm8, %xmm2 -; GFNISSE-NEXT: movdqa %xmm3, %xmm7 -; GFNISSE-NEXT: pshufb %xmm4, %xmm7 -; GFNISSE-NEXT: gf2p8affineqb $0, %xmm6, %xmm4 -; GFNISSE-NEXT: pcmpeqb %xmm4, %xmm5 -; GFNISSE-NEXT: pand %xmm7, %xmm5 -; GFNISSE-NEXT: pshufb %xmm4, %xmm3 -; GFNISSE-NEXT: paddb %xmm5, %xmm3 +; GFNISSE-NEXT: gf2p8affineqb $8, %xmm6, %xmm2 +; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm3 +; GFNISSE-NEXT: psubb %xmm3, %xmm5 +; GFNISSE-NEXT: pand %xmm5, %xmm3 +; GFNISSE-NEXT: gf2p8affineqb $8, %xmm6, %xmm3 ; GFNISSE-NEXT: retq ; ; GFNIAVX1-LABEL: testv64i8u: ; GFNIAVX1: # %bb.0: ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; GFNIAVX1-NEXT: vmovq {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm4 -; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm5 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16] -; GFNIAVX1-NEXT: # xmm5 = mem[0,0] -; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm2, %xmm2 -; GFNIAVX1-NEXT: vpxor %xmm6, %xmm6, %xmm6 -; GFNIAVX1-NEXT: vpcmpeqb %xmm6, %xmm2, %xmm7 -; GFNIAVX1-NEXT: vpand %xmm7, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2 -; GFNIAVX1-NEXT: vpaddb %xmm2, %xmm4, %xmm2 -; GFNIAVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm4 -; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm0, %xmm0 -; GFNIAVX1-NEXT: vpcmpeqb %xmm6, %xmm0, %xmm7 -; GFNIAVX1-NEXT: vpand %xmm7, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm0 -; GFNIAVX1-NEXT: vpaddb %xmm0, %xmm4, %xmm0 +; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; GFNIAVX1-NEXT: # xmm3 = mem[0,0] +; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm3, %xmm2, %xmm2 +; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0 +; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4 +; GFNIAVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5 +; GFNIAVX1-NEXT: vpsubb %xmm2, %xmm5, %xmm2 +; GFNIAVX1-NEXT: vpsubb %xmm0, %xmm5, %xmm0 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; GFNIAVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm4 -; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm2, %xmm2 -; GFNIAVX1-NEXT: vpcmpeqb %xmm6, %xmm2, %xmm7 -; GFNIAVX1-NEXT: vpand %xmm7, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2 -; GFNIAVX1-NEXT: vpaddb %xmm2, %xmm4, %xmm2 -; GFNIAVX1-NEXT: vpshufb %xmm1, %xmm3, %xmm4 -; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm1, %xmm1 -; GFNIAVX1-NEXT: vpcmpeqb %xmm6, %xmm1, %xmm5 -; GFNIAVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpshufb %xmm1, %xmm3, %xmm1 -; GFNIAVX1-NEXT: vpaddb %xmm1, %xmm4, %xmm1 -; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; GFNIAVX1-NEXT: vandps %ymm0, %ymm4, %ymm0 +; GFNIAVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664] +; GFNIAVX1-NEXT: vgf2p8affineqb $8, %ymm2, %ymm0, %ymm0 +; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 +; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm3, %xmm4, %xmm4 +; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1 +; GFNIAVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm3 +; GFNIAVX1-NEXT: vpsubb %xmm4, %xmm5, %xmm4 +; GFNIAVX1-NEXT: vpsubb %xmm1, %xmm5, %xmm1 +; GFNIAVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 +; GFNIAVX1-NEXT: vandps %ymm1, %ymm3, %ymm1 +; GFNIAVX1-NEXT: vgf2p8affineqb $8, %ymm2, %ymm1, %ymm1 ; GFNIAVX1-NEXT: retq ; ; GFNIAVX2-LABEL: testv64i8u: ; GFNIAVX2: # %bb.0: -; GFNIAVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX2-NEXT: # ymm2 = mem[0,1,0,1] -; GFNIAVX2-NEXT: vpshufb %ymm0, %ymm2, %ymm3 -; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16] -; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm4, %ymm0, %ymm0 -; GFNIAVX2-NEXT: vpxor %xmm5, %xmm5, %xmm5 -; GFNIAVX2-NEXT: vpcmpeqb %ymm5, %ymm0, %ymm6 -; GFNIAVX2-NEXT: vpand %ymm6, %ymm3, %ymm3 -; GFNIAVX2-NEXT: vpshufb %ymm0, %ymm2, %ymm0 -; GFNIAVX2-NEXT: vpaddb %ymm0, %ymm3, %ymm0 -; GFNIAVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm3 -; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm4, %ymm1, %ymm1 -; GFNIAVX2-NEXT: vpcmpeqb %ymm5, %ymm1, %ymm4 -; GFNIAVX2-NEXT: vpand %ymm4, %ymm3, %ymm3 -; GFNIAVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm1 -; GFNIAVX2-NEXT: vpaddb %ymm1, %ymm3, %ymm1 +; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0 +; GFNIAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; GFNIAVX2-NEXT: vpsubb %ymm0, %ymm3, %ymm4 +; GFNIAVX2-NEXT: vpand %ymm4, %ymm0, %ymm0 +; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [12307476859704049664,12307476859704049664,12307476859704049664,12307476859704049664] +; GFNIAVX2-NEXT: vgf2p8affineqb $8, %ymm4, %ymm0, %ymm0 +; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1 +; GFNIAVX2-NEXT: vpsubb %ymm1, %ymm3, %ymm2 +; GFNIAVX2-NEXT: vpand %ymm2, %ymm1, %ymm1 +; GFNIAVX2-NEXT: vgf2p8affineqb $8, %ymm4, %ymm1, %ymm1 ; GFNIAVX2-NEXT: retq ; ; GFNIAVX512VL-LABEL: testv64i8u: ; GFNIAVX512VL: # %bb.0: ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; GFNIAVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX512VL-NEXT: # ymm2 = mem[0,1,0,1] -; GFNIAVX512VL-NEXT: vpshufb %ymm1, %ymm2, %ymm3 -; GFNIAVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16] -; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm4, %ymm1, %ymm1 -; GFNIAVX512VL-NEXT: vpxor %xmm5, %xmm5, %xmm5 -; GFNIAVX512VL-NEXT: vpcmpeqb %ymm5, %ymm1, %ymm6 -; GFNIAVX512VL-NEXT: vpand %ymm6, %ymm3, %ymm3 -; GFNIAVX512VL-NEXT: vpshufb %ymm1, %ymm2, %ymm1 -; GFNIAVX512VL-NEXT: vpaddb %ymm1, %ymm3, %ymm1 -; GFNIAVX512VL-NEXT: vpshufb %ymm0, %ymm2, %ymm3 -; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm4, %ymm0, %ymm0 -; GFNIAVX512VL-NEXT: vpcmpeqb %ymm5, %ymm0, %ymm4 -; GFNIAVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3 -; GFNIAVX512VL-NEXT: vpshufb %ymm0, %ymm2, %ymm0 -; GFNIAVX512VL-NEXT: vpaddb %ymm0, %ymm3, %ymm0 +; GFNIAVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1 +; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0 +; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm2 +; GFNIAVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; GFNIAVX512VL-NEXT: vpsubb %ymm1, %ymm3, %ymm1 +; GFNIAVX512VL-NEXT: vpsubb %ymm0, %ymm3, %ymm0 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; GFNIAVX512VL-NEXT: vpandq %zmm0, %zmm2, %zmm0 +; GFNIAVX512VL-NEXT: vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 ; GFNIAVX512VL-NEXT: retq ; ; GFNIAVX512BW-LABEL: testv64i8u: ; GFNIAVX512BW: # %bb.0: -; GFNIAVX512BW-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm1 -; GFNIAVX512BW-NEXT: vptestnmb %zmm1, %zmm1, %k1 -; GFNIAVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; GFNIAVX512BW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] -; GFNIAVX512BW-NEXT: vpshufb %zmm0, %zmm2, %zmm0 {%k1} {z} -; GFNIAVX512BW-NEXT: vpshufb %zmm1, %zmm2, %zmm1 -; GFNIAVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; GFNIAVX512BW-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; GFNIAVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; GFNIAVX512BW-NEXT: vpsubb %zmm0, %zmm1, %zmm1 +; GFNIAVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; GFNIAVX512BW-NEXT: vgf2p8affineqb $8, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 ; GFNIAVX512BW-NEXT: retq %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 -1) ret <64 x i8> %out