diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td index 2f0d9de42b486..24bf7da4dadc0 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -493,7 +493,15 @@ def LeadingOnesMask : PatLeaf<(imm), [{ if (!N->hasOneUse()) return false; return !isInt<32>(N->getSExtValue()) && isMask_64(~N->getSExtValue()); -}], TrailingZeros>; +}], TrailingZeros> { + let GISelPredicateCode = [{ + if (!MRI.hasOneNonDBGUse(MI.getOperand(0).getReg())) + return false; + const auto &MO = MI.getOperand(1); + return !isInt<32>(MO.getCImm()->getSExtValue()) && + isMask_64(~MO.getCImm()->getSExtValue()); + }]; +} def TrailingOnesMask : PatLeaf<(imm), [{ if (!N->hasOneUse()) @@ -520,7 +528,17 @@ def LeadingOnesWMask : PatLeaf<(imm), [{ int64_t Imm = N->getSExtValue(); return !isInt<32>(Imm) && isUInt<32>(Imm) && isShiftedMask_64(Imm) && Imm != UINT64_C(0xffffffff); -}], TrailingZeros>; +}], TrailingZeros> { + let GISelPredicateCode = [{ + if (!MRI.hasOneNonDBGUse(MI.getOperand(0).getReg())) + return false; + const auto &MO = MI.getOperand(1); + int64_t Imm = MO.getCImm()->getSExtValue(); + return !isInt<32>(Imm) && isUInt<32>(Imm) && isShiftedMask_64(Imm) && + Imm != UINT64_C(0xffffffff); + }]; + +} //===----------------------------------------------------------------------===// // Instruction Formats diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll index 534fec21ce7c4..66eb4372aefad 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll @@ -169,11 +169,10 @@ define double @fsgnj_d(double %a, double %b) nounwind { ; ; RV64I-LABEL: fsgnj_d: ; RV64I: # %bb.0: -; RV64I-NEXT: li a2, -1 ; RV64I-NEXT: slli a0, a0, 1 -; RV64I-NEXT: slli a2, a2, 63 +; RV64I-NEXT: srli a1, a1, 63 ; RV64I-NEXT: srli a0, a0, 1 -; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: slli a1, a1, 63 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret %1 = call double @llvm.copysign.f64(double %a, double %b) @@ -1354,12 +1353,11 @@ define double @fsgnjx_f64(double %x, double %y) nounwind { ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: li a2, -1 -; RV64I-NEXT: li a3, 1023 -; RV64I-NEXT: slli a2, a2, 63 -; RV64I-NEXT: slli a3, a3, 52 -; RV64I-NEXT: and a0, a0, a2 -; RV64I-NEXT: or a0, a0, a3 +; RV64I-NEXT: li a2, 1023 +; RV64I-NEXT: srli a0, a0, 63 +; RV64I-NEXT: slli a2, a2, 52 +; RV64I-NEXT: slli a0, a0, 63 +; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: call __muldf3 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll index 8d77d41ab6b45..1469d49e210e0 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll @@ -721,11 +721,10 @@ define double @copysign_f64(double %a, double %b) nounwind { ; ; RV64I-LABEL: copysign_f64: ; RV64I: # %bb.0: -; RV64I-NEXT: li a2, -1 ; RV64I-NEXT: slli a0, a0, 1 -; RV64I-NEXT: slli a2, a2, 63 +; RV64I-NEXT: srli a1, a1, 63 ; RV64I-NEXT: srli a0, a0, 1 -; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: slli a1, a1, 63 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret %1 = call double @llvm.copysign.f64(double %a, double %b) diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll index 2fc25fbb39bb9..eb48c90e14f80 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll @@ -108,12 +108,11 @@ define fp128 @fabs(fp128 %x) { define fp128 @fcopysign(fp128 %x, fp128 %y) { ; CHECK-LABEL: fcopysign: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, -1 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: slli a2, a2, 63 +; CHECK-NEXT: srli a3, a3, 63 ; CHECK-NEXT: srli a1, a1, 1 -; CHECK-NEXT: and a2, a3, a2 -; CHECK-NEXT: or a1, a1, a2 +; CHECK-NEXT: slli a3, a3, 63 +; CHECK-NEXT: or a1, a1, a3 ; CHECK-NEXT: ret %a = call fp128 @llvm.copysign.f128(fp128 %x, fp128 %y) ret fp128 %a diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll index 993ba19caa6b4..736bb8fea599e 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll @@ -105,22 +105,12 @@ define i64 @zextw_i64(i64 %a) nounwind { ; This makes sure targetShrinkDemandedConstant changes the and immmediate to ; allow zext.w or slli+srli. define i64 @zextw_demandedbits_i64(i64 %0) { -; RV64I-LABEL: zextw_demandedbits_i64: -; RV64I: # %bb.0: -; RV64I-NEXT: li a1, 1 -; RV64I-NEXT: slli a1, a1, 32 -; RV64I-NEXT: addi a1, a1, -2 -; RV64I-NEXT: and a0, a0, a1 -; RV64I-NEXT: ori a0, a0, 1 -; RV64I-NEXT: ret -; -; RV64ZBA-LABEL: zextw_demandedbits_i64: -; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: li a1, -2 -; RV64ZBA-NEXT: zext.w a1, a1 -; RV64ZBA-NEXT: and a0, a0, a1 -; RV64ZBA-NEXT: ori a0, a0, 1 -; RV64ZBA-NEXT: ret +; CHECK-LABEL: zextw_demandedbits_i64: +; CHECK: # %bb.0: +; CHECK-NEXT: srliw a0, a0, 1 +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: ori a0, a0, 1 +; CHECK-NEXT: ret %2 = and i64 %0, 4294967294 %3 = or i64 %2, 1 ret i64 %3