diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp index 24528c5cdbdb0..632c549abca52 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp @@ -203,8 +203,9 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower(); getActionDefinitionsBuilder({G_ROTL, G_ROTR}) - .legalFor(ST.hasStdExtZbb() || ST.hasStdExtZbkb(), - {{s32, s32}, {sXLen, sXLen}}) + .legalFor(ST.hasStdExtZbb() || ST.hasStdExtZbkb(), {{sXLen, sXLen}}) + .customFor(ST.is64Bit() && (ST.hasStdExtZbb() || ST.hasStdExtZbkb()), + {{s32, s32}}) .lower(); getActionDefinitionsBuilder(G_BITREVERSE).maxScalar(0, sXLen).lower(); @@ -1162,6 +1163,10 @@ static unsigned getRISCVWOpcode(unsigned Opcode) { switch (Opcode) { default: llvm_unreachable("Unexpected opcode"); + case TargetOpcode::G_ROTL: + return RISCV::G_ROLW; + case TargetOpcode::G_ROTR: + return RISCV::G_RORW; case TargetOpcode::G_CTLZ: return RISCV::G_CLZW; case TargetOpcode::G_CTTZ: @@ -1205,6 +1210,16 @@ bool RISCVLegalizerInfo::legalizeCustom( return Helper.lower(MI, 0, /* Unused hint type */ LLT()) == LegalizerHelper::Legalized; } + case TargetOpcode::G_ROTL: + case TargetOpcode::G_ROTR: { + Helper.Observer.changingInstr(MI); + Helper.widenScalarSrc(MI, sXLen, 1, TargetOpcode::G_ANYEXT); + Helper.widenScalarSrc(MI, sXLen, 2, TargetOpcode::G_ANYEXT); + Helper.widenScalarDst(MI, sXLen); + MI.setDesc(MIRBuilder.getTII().get(getRISCVWOpcode(MI.getOpcode()))); + Helper.Observer.changedInstr(MI); + return true; + } case TargetOpcode::G_CTLZ: case TargetOpcode::G_CTTZ: { Helper.Observer.changingInstr(MI); diff --git a/llvm/lib/Target/RISCV/RISCVGISel.td b/llvm/lib/Target/RISCV/RISCVGISel.td index f312ab1a70ea0..e3267642ceeed 100644 --- a/llvm/lib/Target/RISCV/RISCVGISel.td +++ b/llvm/lib/Target/RISCV/RISCVGISel.td @@ -274,14 +274,6 @@ let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in { def : Pat<(i32 (and GPR:$rs1, (not GPR:$rs2))), (ANDN GPR:$rs1, GPR:$rs2)>; def : Pat<(i32 (or GPR:$rs1, (not GPR:$rs2))), (ORN GPR:$rs1, GPR:$rs2)>; def : Pat<(i32 (xor GPR:$rs1, (not GPR:$rs2))), (XNOR GPR:$rs1, GPR:$rs2)>; - -def : PatGprGpr; -def : PatGprGpr; -def : Pat<(i32 (rotr GPR:$rs1, uimm5i32:$imm)), - (RORIW GPR:$rs1, (i64 (as_i64imm $imm)))>; - -def : Pat<(i32 (rotl GPR:$rs1, uimm5i32:$rs2)), - (RORIW GPR:$rs1, (ImmSubFrom32 uimm5i32:$rs2))>; } // Predicates = [HasStdExtZbbOrZbkb, IsRV64] let Predicates = [HasStdExtZba, IsRV64] in { diff --git a/llvm/lib/Target/RISCV/RISCVInstrGISel.td b/llvm/lib/Target/RISCV/RISCVInstrGISel.td index 927811eef6618..424623360d255 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrGISel.td +++ b/llvm/lib/Target/RISCV/RISCVInstrGISel.td @@ -17,6 +17,22 @@ class RISCVGenericInstruction : GenericInstruction { let Namespace = "RISCV"; } +// Pseudo equivalent to a RISCVISD::RORW. +def G_RORW : RISCVGenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src1, type0:$src2); + let hasSideEffects = false; +} +def : GINodeEquiv; + +// Pseudo equivalent to a RISCVISD::ROLW. +def G_ROLW : RISCVGenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src1, type0:$src2); + let hasSideEffects = false; +} +def : GINodeEquiv; + // Pseudo equivalent to a RISCVISD::CLZW. def G_CLZW : RISCVGenericInstruction { let OutOperandList = (outs type0:$dst); diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rotate-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rotate-rv64.mir index 50b96e0ee972e..edf7ef2203cbf 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rotate-rv64.mir +++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rotate-rv64.mir @@ -22,12 +22,9 @@ body: | ; CHECK-NEXT: $x10 = COPY [[ROLW]] ; CHECK-NEXT: PseudoRET implicit $x10 %0:gprb(s64) = COPY $x10 - %1:gprb(s32) = G_TRUNC %0(s64) - %2:gprb(s64) = COPY $x11 - %6:gprb(s32) = G_TRUNC %2(s64) - %4:gprb(s32) = G_ROTL %1, %6(s32) - %5:gprb(s64) = G_ANYEXT %4(s32) - $x10 = COPY %5(s64) + %1:gprb(s64) = COPY $x11 + %2:gprb(s64) = G_ROLW %0, %1(s64) + $x10 = COPY %2(s64) PseudoRET implicit $x10 ... @@ -72,12 +69,9 @@ body: | ; CHECK-NEXT: $x10 = COPY [[RORW]] ; CHECK-NEXT: PseudoRET implicit $x10 %0:gprb(s64) = COPY $x10 - %1:gprb(s32) = G_TRUNC %0(s64) - %2:gprb(s64) = COPY $x11 - %6:gprb(s32) = G_TRUNC %2(s64) - %4:gprb(s32) = G_ROTR %1, %6(s32) - %5:gprb(s64) = G_ANYEXT %4(s32) - $x10 = COPY %5(s64) + %1:gprb(s64) = COPY $x11 + %2:gprb(s64) = G_RORW %0, %1(s64) + $x10 = COPY %2(s64) PseudoRET implicit $x10 ... @@ -121,11 +115,9 @@ body: | ; CHECK-NEXT: $x10 = COPY [[RORIW]] ; CHECK-NEXT: PseudoRET implicit $x10 %0:gprb(s64) = COPY $x10 - %1:gprb(s32) = G_TRUNC %0(s64) - %2:gprb(s32) = G_CONSTANT i32 15 - %3:gprb(s32) = G_ROTL %1, %2(s32) - %4:gprb(s64) = G_ANYEXT %3(s32) - $x10 = COPY %4(s64) + %1:gprb(s64) = G_CONSTANT i64 15 + %2:gprb(s64) = G_ROLW %0, %1(s64) + $x10 = COPY %2(s64) PseudoRET implicit $x10 ... @@ -169,11 +161,9 @@ body: | ; CHECK-NEXT: $x10 = COPY [[RORIW]] ; CHECK-NEXT: PseudoRET implicit $x10 %0:gprb(s64) = COPY $x10 - %1:gprb(s32) = G_TRUNC %0(s64) - %2:gprb(s32) = G_CONSTANT i32 15 - %3:gprb(s32) = G_ROTR %1, %2(s32) - %4:gprb(s64) = G_ANYEXT %3(s32) - $x10 = COPY %4(s64) + %1:gprb(s64) = G_CONSTANT i64 15 + %2:gprb(s64) = G_RORW %0, %1(s64) + $x10 = COPY %2(s64) PseudoRET implicit $x10 ... diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir index 2334fe1015e2f..a0d23d891b14a 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir @@ -109,12 +109,9 @@ body: | ; RV64ZBB_OR_RV64ZBKB: liveins: $x10, $x11 ; RV64ZBB_OR_RV64ZBKB-NEXT: {{ $}} ; RV64ZBB_OR_RV64ZBKB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 - ; RV64ZBB_OR_RV64ZBKB-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; RV64ZBB_OR_RV64ZBKB-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 - ; RV64ZBB_OR_RV64ZBKB-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) - ; RV64ZBB_OR_RV64ZBKB-NEXT: [[ROTL:%[0-9]+]]:_(s32) = G_ROTL [[TRUNC]], [[TRUNC1]](s32) - ; RV64ZBB_OR_RV64ZBKB-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ROTL]](s32) - ; RV64ZBB_OR_RV64ZBKB-NEXT: $x10 = COPY [[ANYEXT]](s64) + ; RV64ZBB_OR_RV64ZBKB-NEXT: [[ROLW:%[0-9]+]]:_(s64) = G_ROLW [[COPY]], [[COPY1]] + ; RV64ZBB_OR_RV64ZBKB-NEXT: $x10 = COPY [[ROLW]](s64) ; RV64ZBB_OR_RV64ZBKB-NEXT: PseudoRET implicit $x10 %2:_(s64) = COPY $x10 %0:_(s32) = G_TRUNC %2(s64) @@ -268,12 +265,9 @@ body: | ; RV64ZBB_OR_RV64ZBKB: liveins: $x10, $x11 ; RV64ZBB_OR_RV64ZBKB-NEXT: {{ $}} ; RV64ZBB_OR_RV64ZBKB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 - ; RV64ZBB_OR_RV64ZBKB-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; RV64ZBB_OR_RV64ZBKB-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 - ; RV64ZBB_OR_RV64ZBKB-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) - ; RV64ZBB_OR_RV64ZBKB-NEXT: [[ROTR:%[0-9]+]]:_(s32) = G_ROTR [[TRUNC]], [[TRUNC1]](s32) - ; RV64ZBB_OR_RV64ZBKB-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ROTR]](s32) - ; RV64ZBB_OR_RV64ZBKB-NEXT: $x10 = COPY [[ANYEXT]](s64) + ; RV64ZBB_OR_RV64ZBKB-NEXT: [[RORW:%[0-9]+]]:_(s64) = G_RORW [[COPY]], [[COPY1]] + ; RV64ZBB_OR_RV64ZBKB-NEXT: $x10 = COPY [[RORW]](s64) ; RV64ZBB_OR_RV64ZBKB-NEXT: PseudoRET implicit $x10 %2:_(s64) = COPY $x10 %0:_(s32) = G_TRUNC %2(s64) diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll index 3d78d15057ba4..d9b7f16131c35 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll @@ -166,7 +166,9 @@ define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind { ; ; RV64ZBB-ZBKB-LABEL: rol_i32_neg_constant_rhs: ; RV64ZBB-ZBKB: # %bb.0: -; RV64ZBB-ZBKB-NEXT: li a1, -2 +; RV64ZBB-ZBKB-NEXT: li a1, 1 +; RV64ZBB-ZBKB-NEXT: slli a1, a1, 32 +; RV64ZBB-ZBKB-NEXT: addi a1, a1, -2 ; RV64ZBB-ZBKB-NEXT: rolw a0, a1, a0 ; RV64ZBB-ZBKB-NEXT: ret %1 = tail call i32 @llvm.fshl.i32(i32 -2, i32 -2, i32 %a) @@ -250,7 +252,9 @@ define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind { ; ; RV64ZBB-ZBKB-LABEL: ror_i32_neg_constant_rhs: ; RV64ZBB-ZBKB: # %bb.0: -; RV64ZBB-ZBKB-NEXT: li a1, -2 +; RV64ZBB-ZBKB-NEXT: li a1, 1 +; RV64ZBB-ZBKB-NEXT: slli a1, a1, 32 +; RV64ZBB-ZBKB-NEXT: addi a1, a1, -2 ; RV64ZBB-ZBKB-NEXT: rorw a0, a1, a0 ; RV64ZBB-ZBKB-NEXT: ret %1 = tail call i32 @llvm.fshr.i32(i32 -2, i32 -2, i32 %a)