Skip to content

Commit 09a4b76

Browse files
committed
[RISCV][GISel] Add manual isel for s8/s16/s32 load/store for the GPR bank.
GISel doesn't distinquish integer and FP loads and stores. We only know which it is after register bank selection. This results in s16/s32 loads/stores on the GPR register bank that need to be selected. This required extra isel patterns not needed for SDAG and adding i16 and i32 to the GPR register class. Having i16/i32 on the GPR register class makes type interfence in tablegen less effective, requiring explicit casts to be added to patterns. This patch removes the extra isel patterns and replaces it with custom instruction selection similar to what is done on AArch64. A future patch will remove i16 and i32 from the GPR register class. Stacked on #161774.
1 parent ee493c5 commit 09a4b76

File tree

5 files changed

+90
-59
lines changed

5 files changed

+90
-59
lines changed

llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -675,6 +675,26 @@ static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
675675
CC = getRISCVCCFromICmp(Pred);
676676
}
677677

678+
/// Select the RISC-V opcode for the G_LOAD or G_STORE operation \p GenericOpc,
679+
/// appropriate for the GPR register bank and of memory access size \p OpSize.
680+
/// \returns \p GenericOpc if the combination is unsupported.
681+
static unsigned selectLoadStoreOp(unsigned GenericOpc, unsigned OpSizeInBytes) {
682+
const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
683+
switch (OpSizeInBytes) {
684+
case 1:
685+
// Prefer unsigned due to no c.lb in Zcb.
686+
return IsStore ? RISCV::SB : RISCV::LBU;
687+
case 2:
688+
return IsStore ? RISCV::SH : RISCV::LH;
689+
case 4:
690+
return IsStore ? RISCV::SW : RISCV::LW;
691+
case 8:
692+
return IsStore ? RISCV::SD : RISCV::LD;
693+
}
694+
695+
return GenericOpc;
696+
}
697+
678698
bool RISCVInstructionSelector::select(MachineInstr &MI) {
679699
MachineIRBuilder MIB(MI);
680700

@@ -836,6 +856,69 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
836856
return selectImplicitDef(MI, MIB);
837857
case TargetOpcode::G_UNMERGE_VALUES:
838858
return selectUnmergeValues(MI, MIB);
859+
case TargetOpcode::G_LOAD:
860+
case TargetOpcode::G_STORE: {
861+
GLoadStore &LdSt = cast<GLoadStore>(MI);
862+
const Register ValReg = LdSt.getReg(0);
863+
const Register PtrReg = LdSt.getPointerReg();
864+
LLT PtrTy = MRI->getType(PtrReg);
865+
866+
const RegisterBank &RB = *RBI.getRegBank(ValReg, *MRI, TRI);
867+
if (RB.getID() != RISCV::GPRBRegBankID)
868+
return false;
869+
870+
#ifndef NDEBUG
871+
const RegisterBank &PtrRB =
872+
*RBI.getRegBank(PtrReg, *MRI, TRI);
873+
// Check that the pointer register is valid.
874+
assert(PtrRB.getID() == RISCV::GPRBRegBankID &&
875+
"Load/Store pointer operand isn't a GPR");
876+
assert(PtrTy.isPointer() &&
877+
"Load/Store pointer operand isn't a pointer");
878+
#endif
879+
880+
// Can only handle AddressSpace 0.
881+
if (PtrTy.getAddressSpace() != 0)
882+
return false;
883+
884+
unsigned MemSizeInBytes = LdSt.getMemSize().getValue();
885+
AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering();
886+
887+
if (isStrongerThanMonotonic(Order)) {
888+
assert(MemSizeInBytes <= 8 && "Unexpected mem size!");
889+
static constexpr unsigned LoadOpcodes[] = {
890+
RISCV::LB_AQ, RISCV::LH_AQ, RISCV::LW_AQ, RISCV::LD_AQ
891+
};
892+
static constexpr unsigned StoreOpcodes[] = {
893+
RISCV::SB_RL, RISCV::SH_RL, RISCV::SW_RL, RISCV::SD_RL
894+
};
895+
ArrayRef<unsigned> Opcodes = isa<GLoad>(LdSt) ? LoadOpcodes : StoreOpcodes;
896+
MI.setDesc(TII.get(Opcodes[Log2_32(MemSizeInBytes)]));
897+
return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
898+
}
899+
900+
const unsigned NewOpc = selectLoadStoreOp(MI.getOpcode(), MemSizeInBytes);
901+
if (NewOpc == MI.getOpcode())
902+
return false;
903+
904+
// Check if we can fold anything into the addressing mode.
905+
auto AddrModeFns = selectAddrRegImm(MI.getOperand(1));
906+
if (!AddrModeFns)
907+
return false;
908+
909+
// Folded something. Create a new instruction and return it.
910+
auto NewInst = MIB.buildInstr(NewOpc, {}, {}, MI.getFlags());
911+
if (isa<GStore>(MI))
912+
NewInst.addUse(ValReg);
913+
else
914+
NewInst.addDef(ValReg);
915+
NewInst.cloneMemRefs(MI);
916+
for (auto &Fn : *AddrModeFns)
917+
Fn(NewInst);
918+
MI.eraseFromParent();
919+
920+
return constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI);
921+
}
839922
default:
840923
return false;
841924
}

llvm/lib/Target/RISCV/RISCVGISel.td

Lines changed: 0 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -100,39 +100,11 @@ def : LdPat<load, LD, PtrVT>;
100100
def : StPat<store, SD, GPR, PtrVT>;
101101
}
102102

103-
// Load and store patterns for i16, needed because Zfh makes s16 load/store
104-
// legal and regbank select may not constrain registers to FP.
105-
def : LdPat<load, LH, i16>;
106-
def : StPat<store, SH, GPR, i16>;
107-
108-
def : LdPat<extloadi8, LBU, i16>; // Prefer unsigned due to no c.lb in Zcb.
109-
def : StPat<truncstorei8, SB, GPR, i16>;
110-
111-
let Predicates = [HasAtomicLdSt] in {
112-
// Prefer unsigned due to no c.lb in Zcb.
113-
def : LdPat<relaxed_load<atomic_load_aext_8>, LBU, i16>;
114-
def : LdPat<relaxed_load<atomic_load_nonext_16>, LH, i16>;
115-
116-
def : StPat<relaxed_store<atomic_store_8>, SB, GPR, i16>;
117-
def : StPat<relaxed_store<atomic_store_16>, SH, GPR, i16>;
118-
}
119-
120-
let Predicates = [HasAtomicLdSt, IsRV64] in {
121-
// Load pattern is in RISCVInstrInfoA.td and shared with RV32.
122-
def : StPat<relaxed_store<atomic_store_32>, SW, GPR, i32>;
123-
}
124-
125103
//===----------------------------------------------------------------------===//
126104
// RV64 i32 patterns not used by SelectionDAG
127105
//===----------------------------------------------------------------------===//
128106

129107
let Predicates = [IsRV64] in {
130-
def : LdPat<extloadi8, LBU, i32>; // Prefer unsigned due to no c.lb in Zcb.
131-
def : LdPat<extloadi16, LH, i32>;
132-
133-
def : StPat<truncstorei8, SB, GPR, i32>;
134-
def : StPat<truncstorei16, SH, GPR, i32>;
135-
136108
def : Pat<(sext (i32 GPR:$src)), (ADDIW GPR:$src, 0)>;
137109

138110
def : Pat<(sext_inreg (i64 (add GPR:$rs1, simm12_lo:$imm)), i32),
@@ -190,29 +162,3 @@ let Predicates = [HasStdExtZbkb, NoStdExtZbb, IsRV64] in {
190162
def : Pat<(i64 (zext (i16 GPR:$rs))), (PACKW GPR:$rs, (XLenVT X0))>;
191163
def : Pat<(i32 (zext (i16 GPR:$rs))), (PACKW GPR:$rs, (XLenVT X0))>;
192164
}
193-
194-
//===----------------------------------------------------------------------===//
195-
// Zalasr patterns not used by SelectionDAG
196-
//===----------------------------------------------------------------------===//
197-
198-
let Predicates = [HasStdExtZalasr] in {
199-
// the sequentially consistent loads use
200-
// .aq instead of .aqrl to match the psABI/A.7
201-
def : PatLAQ<acquiring_load<atomic_load_aext_8>, LB_AQ, i16>;
202-
def : PatLAQ<seq_cst_load<atomic_load_aext_8>, LB_AQ, i16>;
203-
204-
def : PatLAQ<acquiring_load<atomic_load_nonext_16>, LH_AQ, i16>;
205-
def : PatLAQ<seq_cst_load<atomic_load_nonext_16>, LH_AQ, i16>;
206-
207-
def : PatSRL<releasing_store<atomic_store_8>, SB_RL, i16>;
208-
def : PatSRL<seq_cst_store<atomic_store_8>, SB_RL, i16>;
209-
210-
def : PatSRL<releasing_store<atomic_store_16>, SH_RL, i16>;
211-
def : PatSRL<seq_cst_store<atomic_store_16>, SH_RL, i16>;
212-
}
213-
214-
let Predicates = [HasStdExtZalasr, IsRV64] in {
215-
// Load pattern is in RISCVInstrInfoZalasr.td and shared with RV32.
216-
def : PatSRL<releasing_store<atomic_store_32>, SW_RL, i32>;
217-
def : PatSRL<seq_cst_store<atomic_store_32>, SW_RL, i32>;
218-
}

llvm/lib/Target/RISCV/RISCVInstrInfo.td

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1980,7 +1980,7 @@ def : LdPat<sextloadi8, LB>;
19801980
def : LdPat<extloadi8, LBU>; // Prefer unsigned due to no c.lb in Zcb.
19811981
def : LdPat<sextloadi16, LH>;
19821982
def : LdPat<extloadi16, LH>;
1983-
def : LdPat<load, LW, i32>;
1983+
def : LdPat<load, LW, i32>, Requires<[IsRV32]>;
19841984
def : LdPat<zextloadi8, LBU>;
19851985
def : LdPat<zextloadi16, LHU>;
19861986

@@ -1994,7 +1994,7 @@ class StPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy,
19941994

19951995
def : StPat<truncstorei8, SB, GPR, XLenVT>;
19961996
def : StPat<truncstorei16, SH, GPR, XLenVT>;
1997-
def : StPat<store, SW, GPR, i32>;
1997+
def : StPat<store, SW, GPR, i32>, Requires<[IsRV32]>;
19981998

19991999
/// Fences
20002000

llvm/lib/Target/RISCV/RISCVInstrInfoA.td

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,8 +174,9 @@ let Predicates = [HasAtomicLdSt] in {
174174
def : StPat<relaxed_store<atomic_store_8>, SB, GPR, XLenVT>;
175175
def : StPat<relaxed_store<atomic_store_16>, SH, GPR, XLenVT>;
176176
def : StPat<relaxed_store<atomic_store_32>, SW, GPR, XLenVT>;
177+
}
177178

178-
// Used by GISel for RV32 and RV64.
179+
let Predicates = [HasAtomicLdSt, IsRV32] in {
179180
def : LdPat<relaxed_load<atomic_load_nonext_32>, LW, i32>;
180181
}
181182

llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,11 +93,12 @@ let Predicates = [HasStdExtZalasr] in {
9393

9494
def : PatSRL<releasing_store<atomic_store_32>, SW_RL>;
9595
def : PatSRL<seq_cst_store<atomic_store_32>, SW_RL>;
96+
} // Predicates = [HasStdExtZalasr]
9697

97-
// Used by GISel for RV32 and RV64.
98+
let Predicates = [HasStdExtZalasr, IsRV32] in {
9899
def : PatLAQ<acquiring_load<atomic_load_nonext_32>, LW_AQ, i32>;
99100
def : PatLAQ<seq_cst_load<atomic_load_nonext_32>, LW_AQ, i32>;
100-
} // Predicates = [HasStdExtZalasr]
101+
} // Predicates = [HasStdExtZalasr, IsRV32]
101102

102103
let Predicates = [HasStdExtZalasr, IsRV64] in {
103104
def : PatLAQ<acquiring_load<atomic_load_asext_32>, LW_AQ, i64>;

0 commit comments

Comments
 (0)