diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index 4e4241efd63d6..dcc279f0d34d7 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -3928,9 +3928,17 @@ bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) { SDLoc DL(Node); - // Truncate the shift amount. - NBits = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NBits); - insertDAGNode(*CurDAG, SDValue(Node, 0), NBits); + if (NBits.getSimpleValueType() != MVT::i8) { + // Truncate the shift amount. + NBits = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NBits); + insertDAGNode(*CurDAG, SDValue(Node, 0), NBits); + } + + // Turn (i32)(x & imm8) into (i32)x & imm32. + ConstantSDNode *Imm = nullptr; + if (NBits->getOpcode() == ISD::AND) + if ((Imm = dyn_cast(NBits->getOperand(1)))) + NBits = NBits->getOperand(0); // Insert 8-bit NBits into lowest 8 bits of 32-bit register. // All the other bits are undefined, we do not care about them. @@ -3945,6 +3953,13 @@ bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) { 0); insertDAGNode(*CurDAG, SDValue(Node, 0), NBits); + if (Imm) { + NBits = + CurDAG->getNode(ISD::AND, DL, MVT::i32, NBits, + CurDAG->getConstant(Imm->getZExtValue(), DL, MVT::i32)); + insertDAGNode(*CurDAG, SDValue(Node, 0), NBits); + } + // We might have matched the amount of high bits to be cleared, // but we want the amount of low bits to be kept, so negate it then. if (NegateNBits) { diff --git a/llvm/test/CodeGen/X86/extract-lowbits.ll b/llvm/test/CodeGen/X86/extract-lowbits.ll index 848b920490ab8..85b242c22c47c 100644 --- a/llvm/test/CodeGen/X86/extract-lowbits.ll +++ b/llvm/test/CodeGen/X86/extract-lowbits.ll @@ -439,14 +439,14 @@ define i64 @bzhi64_a0_masked(i64 %val, i64 %numlowbits) nounwind { ; ; X64-BMI1-LABEL: bzhi64_a0_masked: ; X64-BMI1: # %bb.0: -; X64-BMI1-NEXT: andb $63, %sil +; X64-BMI1-NEXT: andl $63, %esi ; X64-BMI1-NEXT: shll $8, %esi ; X64-BMI1-NEXT: bextrq %rsi, %rdi, %rax ; X64-BMI1-NEXT: retq ; ; X64-BMI2-LABEL: bzhi64_a0_masked: ; X64-BMI2: # %bb.0: -; X64-BMI2-NEXT: andb $63, %sil +; X64-BMI2-NEXT: andl $63, %esi ; X64-BMI2-NEXT: bzhiq %rsi, %rdi, %rax ; X64-BMI2-NEXT: retq %numlowbits.masked = and i64 %numlowbits, 63