diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index deab638b7e546..26b8cb029c9a8 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -44746,7 +44746,10 @@ bool X86TargetLowering::isSplatValueForTargetNode(SDValue Op, // Helper to peek through bitops/trunc/setcc to determine size of source vector. // Allows combineBitcastvxi1 to determine what size vector generated a . static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size, - bool AllowTruncate) { + bool AllowTruncate, unsigned Depth) { + // Limit recursion. + if (Depth >= SelectionDAG::MaxRecursionDepth) + return false; switch (Src.getOpcode()) { case ISD::TRUNCATE: if (!AllowTruncate) @@ -44755,17 +44758,22 @@ static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size, case ISD::SETCC: return Src.getOperand(0).getValueSizeInBits() == Size; case ISD::FREEZE: - return checkBitcastSrcVectorSize(Src.getOperand(0), Size, AllowTruncate); + return checkBitcastSrcVectorSize(Src.getOperand(0), Size, AllowTruncate, + Depth + 1); case ISD::AND: case ISD::XOR: case ISD::OR: - return checkBitcastSrcVectorSize(Src.getOperand(0), Size, AllowTruncate) && - checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate); + return checkBitcastSrcVectorSize(Src.getOperand(0), Size, AllowTruncate, + Depth + 1) && + checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate, + Depth + 1); case ISD::SELECT: case ISD::VSELECT: return Src.getOperand(0).getScalarValueSizeInBits() == 1 && - checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate) && - checkBitcastSrcVectorSize(Src.getOperand(2), Size, AllowTruncate); + checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate, + Depth + 1) && + checkBitcastSrcVectorSize(Src.getOperand(2), Size, AllowTruncate, + Depth + 1); case ISD::BUILD_VECTOR: return ISD::isBuildVectorAllZeros(Src.getNode()) || ISD::isBuildVectorAllOnes(Src.getNode()); @@ -44936,7 +44944,7 @@ static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src, // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2)) // sign-extend to a 256-bit operation to avoid truncation. if (Subtarget.hasAVX() && - checkBitcastSrcVectorSize(Src, 256, Subtarget.hasAVX2())) { + checkBitcastSrcVectorSize(Src, 256, Subtarget.hasAVX2(), 0)) { SExtVT = MVT::v4i64; PropagateSExt = true; } @@ -44948,8 +44956,8 @@ static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src, // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over // 256-bit because the shuffle is cheaper than sign extending the result of // the compare. - if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256, true) || - checkBitcastSrcVectorSize(Src, 512, true))) { + if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256, true, 0) || + checkBitcastSrcVectorSize(Src, 512, true, 0))) { SExtVT = MVT::v8i32; PropagateSExt = true; } @@ -44974,7 +44982,7 @@ static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src, break; } // Split if this is a <64 x i8> comparison result. - if (checkBitcastSrcVectorSize(Src, 512, false)) { + if (checkBitcastSrcVectorSize(Src, 512, false, 0)) { SExtVT = MVT::v64i8; break; }