Skip to content
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 34 additions & 0 deletions llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,10 @@ class AArch64ExpandPseudo : public MachineFunctionPass {
TargetRegisterClass ContiguousClass,
TargetRegisterClass StridedClass,
unsigned ContiguousOpc, unsigned StridedOpc);
bool expandFormTuplePseudo(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI,
unsigned Size);
bool expandMOVImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
unsigned BitSize);

Expand Down Expand Up @@ -1142,6 +1146,32 @@ bool AArch64ExpandPseudo::expandMultiVecPseudo(
return true;
}

bool AArch64ExpandPseudo::expandFormTuplePseudo(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI, unsigned Size) {
assert(Size == 2 || Size == 4 && "Invalid Tuple Size");
MachineInstr &MI = *MBBI;
Register ReturnTuple = MI.getOperand(0).getReg();

const TargetRegisterInfo *TRI =
MBB.getParent()->getSubtarget().getRegisterInfo();
for (unsigned i = 0; i < Size; i++) {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit:

Suggested change
for (unsigned i = 0; i < Size; i++) {
for (unsigned I = 0; I < Size; ++I) {

Register FormTupleOpReg = MI.getOperand(i + 1).getReg();
Register ReturnTupleSubReg =
TRI->getSubReg(ReturnTuple, AArch64::zsub0 + i);
// Add copies to ensure the subregisters remain in the correct order
// for any contigious operation they are used by.
if (FormTupleOpReg != ReturnTupleSubReg)
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORR_ZZZ))
.addReg(ReturnTupleSubReg, RegState::Define)
.addReg(FormTupleOpReg)
.addReg(FormTupleOpReg);
}

MI.eraseFromParent();
return true;
}

/// If MBBI references a pseudo instruction that should be expanded here,
/// do the expansion and return true. Otherwise return false.
bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
Expand Down Expand Up @@ -1724,6 +1754,10 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
return expandMultiVecPseudo(
MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
AArch64::LDNT1D_4Z, AArch64::LDNT1D_4Z_STRIDED);
case AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO:
return expandFormTuplePseudo(MBB, MBBI, NextMBBI, 2);
case AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO:
return expandFormTuplePseudo(MBB, MBBI, NextMBBI, 4);
}
return false;
}
Expand Down
74 changes: 74 additions & 0 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8641,6 +8641,57 @@ static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {
return ZExtBool;
}

// The FORM_TRANSPOSED_REG_TUPLE pseudo should only be used if the
// input operands are copy nodes where the source register is in a
// StridedOrContiguous class. For example:
//
// %3:zpr2stridedorcontiguous = LD1B_2Z_IMM_PSEUDO ..
// %4:zpr = COPY %3.zsub1:zpr2stridedorcontiguous
// %5:zpr = COPY %3.zsub0:zpr2stridedorcontiguous
// %6:zpr2stridedorcontiguous = LD1B_2Z_PSEUDO ..
// %7:zpr = COPY %6.zsub1:zpr2stridedorcontiguous
// %8:zpr = COPY %6.zsub0:zpr2stridedorcontiguous
// %9:zpr2mul2 = FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO %5:zpr, %8:zpr
//
bool shouldUseFormStridedPseudo(MachineInstr &MI) {
MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();

MCRegister SubReg = MCRegister::NoRegister;
for (unsigned I = 1; I < MI.getNumOperands(); ++I) {
MachineOperand &MO = MI.getOperand(I);
assert(MO.isReg() && "Unexpected operand to FORM_TRANSPOSED_REG_TUPLE");

MachineOperand *Def = MRI.getOneDef(MO.getReg());
if (!Def || !Def->getParent()->isCopy())
return false;

const MachineOperand &CpySrc = Def->getParent()->getOperand(1);
MachineOperand *CopySrcOp = MRI.getOneDef(CpySrc.getReg());
unsigned OpSubReg = CpySrc.getSubReg();
if (SubReg == MCRegister::NoRegister)
SubReg = OpSubReg;
if (!CopySrcOp || !CopySrcOp->isReg() || OpSubReg != SubReg)
return false;

const TargetRegisterClass *RegClass = nullptr;
switch (MI.getNumOperands() - 1) {
case 2:
RegClass = &AArch64::ZPR2StridedOrContiguousRegClass;
break;
case 4:
RegClass = &AArch64::ZPR4StridedOrContiguousRegClass;
break;
default:
llvm_unreachable("Unexpected number of operands to pseudo.");
}

if (MRI.getRegClass(CopySrcOp->getReg()) != RegClass)
return false;
}

return true;
}

void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
SDNode *Node) const {
// Live-in physreg copies that are glued to SMSTART are applied as
Expand All @@ -8666,6 +8717,29 @@ void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
}
}

if (MI.getOpcode() == AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO ||
MI.getOpcode() == AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO) {
// If input values to the FORM_TRANSPOSED_REG_TUPLE pseudo aren't copies
// from a StridedOrContiguous class, fall back on REG_SEQUENCE node.
if (!shouldUseFormStridedPseudo(MI)) {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Maybe fold those conditions into one? or otherwise bail out early if shouldUseFormStridedPseudo(MI) == true, to avoid a level of indentation?

static const unsigned SubRegs[] = {AArch64::zsub0, AArch64::zsub1,
AArch64::zsub2, AArch64::zsub3};

const TargetInstrInfo *TII = Subtarget->getInstrInfo();
MachineInstrBuilder MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
TII->get(TargetOpcode::REG_SEQUENCE),
MI.getOperand(0).getReg());

for (unsigned I = 1; I < MI.getNumOperands(); ++I) {
MIB.add(MI.getOperand(I));
MIB.addImm(SubRegs[I - 1]);
}

MI.eraseFromParent();
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit:

Suggested change
MIB.addImm(SubRegs[I - 1]);
MIB.addImm(AArch64::zsub0 + (I-1));

Then you can remove SubRegs[].

}
return;
}

// Add an implicit use of 'VG' for ADDXri/SUBXri, which are instructions that
// have nothing to do with VG, were it not that they are used to materialise a
// frame-address. If they contain a frame-index to a scalable vector, this
Expand Down
50 changes: 50 additions & 0 deletions llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1107,6 +1107,56 @@ unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
}
}

// FORM_TRANSPOSED_REG_TUPLE nodes are created to improve register allocation
// where a consecutive multi-vector tuple is constructed from the same indices
// of multiple strided loads. This may still result in unnecessary copies
// between the loads and the tuple. Here we try to return a hint to assign the
// contiguous ZPRMulReg starting at the same register as the first operand of
// the pseudo, which should be a subregister of the first strided load.
//
// For example, if the first strided load has been assigned $z16_z20_z24_z28
// and the operands of the pseudo are each accessing subregister zsub2, we
// should look through through Order to find a contiguous register which
// begins with $z24 (i.e. $z24_z25_z26_z27).
//
bool AArch64RegisterInfo::getRegAllocationHints(
Register VirtReg, ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
const MachineRegisterInfo &MRI = MF.getRegInfo();

for (MachineInstr &MI : MRI.def_instructions(VirtReg)) {
if (MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO &&
MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO)
continue;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: I wonder if we just want to bail out early at this point?


switch (MI.getOperand(1).getSubReg()) {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: please make a variable for MI.getOperand(1).getsubReg(), because it's used more than once.

case AArch64::zsub0:
case AArch64::zsub1:
case AArch64::zsub2:
case AArch64::zsub3:
break;
default:
continue;
}

// Look up the physical register mapped to the first operand of the pseudo.
Register FirstOpVirtReg = MI.getOperand(1).getReg();
if (!VRM->hasPhys(FirstOpVirtReg))
continue;

MCRegister TupleStartReg =
getSubReg(VRM->getPhys(FirstOpVirtReg), MI.getOperand(1).getSubReg());
for (unsigned I = 0; I < Order.size(); ++I)
if (MCRegister R = getSubReg(Order[I], AArch64::zsub0))
if (R == TupleStartReg)
Hints.push_back(Order[I]);
}

return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
VRM);
}

unsigned AArch64RegisterInfo::getLocalAddressRegister(
const MachineFunction &MF) const {
const auto &MFI = MF.getFrameInfo();
Expand Down
5 changes: 5 additions & 0 deletions llvm/lib/Target/AArch64/AArch64RegisterInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,11 @@ class AArch64RegisterInfo final : public AArch64GenRegisterInfo {
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const override;

bool getRegAllocationHints(Register VirtReg, ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints,
const MachineFunction &MF, const VirtRegMap *VRM,
const LiveRegMatrix *Matrix) const override;

unsigned getLocalAddressRegister(const MachineFunction &MF) const;
bool regNeedsCFI(unsigned Reg, unsigned &RegToUseForCFI) const;

Expand Down
18 changes: 16 additions & 2 deletions llvm/lib/Target/AArch64/SMEInstrFormats.td
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,20 @@ def tileslicerange0s4 : ComplexPattern<i32, 2, "SelectSMETileSlice<0, 4>", []>;

def am_sme_indexed_b4 :ComplexPattern<iPTR, 2, "SelectAddrModeIndexedSVE<0,15>", [], [SDNPWantRoot]>;

def FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO :
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this needs a description of why we add these pseudos, and a comment that we expand them to REG_SEQUENCE with the post-isel hook if they don't meet certain criteria.

Pseudo<(outs ZPR2Mul2:$tup),
(ins ZPR:$zn0, ZPR:$zn1), []>, Sched<[]>{
let hasSideEffects = 0;
let hasPostISelHook = 1;
}

def FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO :
Pseudo<(outs ZPR4Mul4:$tup),
(ins ZPR:$zn0, ZPR:$zn1, ZPR:$zn2, ZPR:$zn3), []>, Sched<[]>{
let hasSideEffects = 0;
let hasPostISelHook = 1;
}

def SDTZALoadStore : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisInt<2>]>;
def AArch64SMELdr : SDNode<"AArch64ISD::SME_ZA_LDR", SDTZALoadStore,
[SDNPHasChain, SDNPSideEffect, SDNPMayLoad]>;
Expand Down Expand Up @@ -172,14 +186,14 @@ class SME2_ZA_TwoOp_VG2_Multi_Index_Pat<string name, SDPatternOperator intrinsic
Operand imm_ty, ComplexPattern tileslice>
: Pat<(intrinsic (i32 (tileslice MatrixIndexGPR32Op8_11:$base, index_ty:$offset)), vt:$Zn1, vt:$Zn2, vt:$Zm, (i32 imm_ty:$i)),
(!cast<Instruction>(name # _PSEUDO) $base, $offset,
(REG_SEQUENCE ZPR2Mul2, vt:$Zn1, zsub0, vt:$Zn2, zsub1), zpr_ty:$Zm, imm_ty:$i)>;
(FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO vt:$Zn1,vt:$Zn2), zpr_ty:$Zm, imm_ty:$i)>;

class SME2_ZA_TwoOp_VG4_Multi_Index_Pat<string name, SDPatternOperator intrinsic, Operand index_ty, ZPRRegOp zpr_ty, ValueType vt,
Operand imm_ty, ComplexPattern tileslice>
: Pat<(intrinsic (i32 (tileslice MatrixIndexGPR32Op8_11:$base, index_ty:$offset)),
vt:$Zn1, vt:$Zn2, vt:$Zn3, vt:$Zn4, vt:$Zm, (i32 imm_ty:$i)),
(!cast<Instruction>(name # _PSEUDO) $base, $offset,
(REG_SEQUENCE ZPR4Mul4, vt:$Zn1, zsub0, vt:$Zn2, zsub1, vt:$Zn3, zsub2, vt:$Zn4, zsub3),
(FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO vt:$Zn1, vt:$Zn2, vt:$Zn3, vt:$Zn4),
zpr_ty:$Zm, imm_ty:$i)>;

class SME2_Sat_Shift_VG2_Pat<string name, SDPatternOperator intrinsic, ValueType out_vt, ValueType in_vt, Operand imm_ty>
Expand Down
Loading
Loading