Skip to content

Commit f840ecf

Browse files
committed
[Codegen] Add a separate stack ID for scalable predicates
This splits out "ScalablePredVector" from the "ScalableVector" StackID this is primarily to allow easy differentiation between vectors and predicates (without inspecting instructions). This new stack ID is not used in many places yet, but will be used in a later patch to mark stack slots that are known to contain predicates. Change-Id: I92c4c96af517ab2cfcf0a6eb9a853c2bac9de342
1 parent dcd0a2e commit f840ecf

17 files changed

+71
-54
lines changed

llvm/include/llvm/CodeGen/MIRYamlMapping.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -378,6 +378,8 @@ struct ScalarEnumerationTraits<TargetStackID::Value> {
378378
IO.enumCase(ID, "default", TargetStackID::Default);
379379
IO.enumCase(ID, "sgpr-spill", TargetStackID::SGPRSpill);
380380
IO.enumCase(ID, "scalable-vector", TargetStackID::ScalableVector);
381+
IO.enumCase(ID, "scalable-predicate-vector",
382+
TargetStackID::ScalablePredicateVector);
381383
IO.enumCase(ID, "wasm-local", TargetStackID::WasmLocal);
382384
IO.enumCase(ID, "noalloc", TargetStackID::NoAlloc);
383385
}

llvm/include/llvm/CodeGen/MachineFrameInfo.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -494,7 +494,14 @@ class MachineFrameInfo {
494494
/// Should this stack ID be considered in MaxAlignment.
495495
bool contributesToMaxAlignment(uint8_t StackID) {
496496
return StackID == TargetStackID::Default ||
497-
StackID == TargetStackID::ScalableVector;
497+
StackID == TargetStackID::ScalableVector ||
498+
StackID == TargetStackID::ScalablePredicateVector;
499+
}
500+
501+
bool isScalableStackID(int ObjectIdx) const {
502+
uint8_t StackID = getStackID(ObjectIdx);
503+
return StackID == TargetStackID::ScalableVector ||
504+
StackID == TargetStackID::ScalablePredicateVector;
498505
}
499506

500507
/// setObjectAlignment - Change the alignment of the specified stack object.

llvm/include/llvm/CodeGen/TargetFrameLowering.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ enum Value {
3232
SGPRSpill = 1,
3333
ScalableVector = 2,
3434
WasmLocal = 3,
35+
ScalablePredicateVector = 4,
3536
NoAlloc = 255
3637
};
3738
}

llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ struct StackFrameLayoutAnalysis {
7272
: Slot(Idx), Size(MFI.getObjectSize(Idx)),
7373
Align(MFI.getObjectAlign(Idx).value()), Offset(Offset),
7474
SlotTy(Invalid), Scalable(false) {
75-
Scalable = MFI.getStackID(Idx) == TargetStackID::ScalableVector;
75+
Scalable = MFI.isScalableStackID(Idx);
7676
if (MFI.isSpillSlotObjectIndex(Idx))
7777
SlotTy = SlotType::Spill;
7878
else if (MFI.isFixedObjectIndex(Idx))

llvm/lib/Target/AArch64/AArch64FrameLowering.cpp

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,7 @@ AArch64FrameLowering::getArgumentStackToRestore(MachineFunction &MF,
324324
static bool produceCompactUnwindFrame(const AArch64FrameLowering &,
325325
MachineFunction &MF);
326326

327-
// Conservatively, returns true if the function is likely to have an SVE vectors
327+
// Conservatively, returns true if the function is likely to have SVE vectors
328328
// on the stack. This function is safe to be called before callee-saves or
329329
// object offsets have been determined.
330330
static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL,
@@ -338,7 +338,7 @@ static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL,
338338

339339
const MachineFrameInfo &MFI = MF.getFrameInfo();
340340
for (int FI = MFI.getObjectIndexBegin(); FI < MFI.getObjectIndexEnd(); FI++) {
341-
if (MFI.getStackID(FI) == TargetStackID::ScalableVector)
341+
if (MFI.isScalableStackID(FI))
342342
return true;
343343
}
344344

@@ -1228,7 +1228,7 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
12281228
const auto *AFI = MF.getInfo<AArch64FunctionInfo>();
12291229
bool FPAfterSVECalleeSaves =
12301230
isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize();
1231-
if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
1231+
if (MFI.isScalableStackID(FI)) {
12321232
if (FPAfterSVECalleeSaves &&
12331233
-ObjectOffset <= (int64_t)AFI->getSVECalleeSavedStackSize())
12341234
return StackOffset::getScalable(ObjectOffset);
@@ -1294,7 +1294,7 @@ StackOffset AArch64FrameLowering::resolveFrameIndexReference(
12941294
const auto &MFI = MF.getFrameInfo();
12951295
int64_t ObjectOffset = MFI.getObjectOffset(FI);
12961296
bool isFixed = MFI.isFixedObjectIndex(FI);
1297-
bool isSVE = MFI.getStackID(FI) == TargetStackID::ScalableVector;
1297+
bool isSVE = MFI.isScalableStackID(FI);
12981298
return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, isSVE, FrameReg,
12991299
PreferFP, ForSimm);
13001300
}
@@ -2021,10 +2021,14 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
20212021
}
20222022
// Update the StackIDs of the SVE stack slots.
20232023
MachineFrameInfo &MFI = MF.getFrameInfo();
2024-
if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR) {
2024+
if (RPI.Type == RegPairInfo::ZPR) {
20252025
MFI.setStackID(FrameIdxReg1, TargetStackID::ScalableVector);
20262026
if (RPI.isPaired())
20272027
MFI.setStackID(FrameIdxReg2, TargetStackID::ScalableVector);
2028+
} else if (RPI.Type == RegPairInfo::PPR) {
2029+
MFI.setStackID(FrameIdxReg1, TargetStackID::ScalablePredicateVector);
2030+
if (RPI.isPaired())
2031+
MFI.setStackID(FrameIdxReg2, TargetStackID::ScalablePredicateVector);
20282032
}
20292033
}
20302034
return true;
@@ -2232,8 +2236,7 @@ void AArch64FrameLowering::determineStackHazardSlot(
22322236
for (auto &MI : MBB) {
22332237
std::optional<int> FI = getLdStFrameID(MI, MFI);
22342238
if (FI && *FI >= 0 && *FI < (int)FrameObjects.size()) {
2235-
if (MFI.getStackID(*FI) == TargetStackID::ScalableVector ||
2236-
AArch64InstrInfo::isFpOrNEON(MI))
2239+
if (MFI.isScalableStackID(*FI) || AArch64InstrInfo::isFpOrNEON(MI))
22372240
FrameObjects[*FI] |= 2;
22382241
else
22392242
FrameObjects[*FI] |= 1;
@@ -2678,7 +2681,7 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
26782681
#ifndef NDEBUG
26792682
// First process all fixed stack objects.
26802683
for (int I = MFI.getObjectIndexBegin(); I != 0; ++I)
2681-
assert(MFI.getStackID(I) != TargetStackID::ScalableVector &&
2684+
assert(!MFI.isScalableStackID(I) &&
26822685
"SVE vectors should never be passed on the stack by value, only by "
26832686
"reference.");
26842687
#endif
@@ -2712,12 +2715,11 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
27122715
int StackProtectorFI = -1;
27132716
if (MFI.hasStackProtectorIndex()) {
27142717
StackProtectorFI = MFI.getStackProtectorIndex();
2715-
if (MFI.getStackID(StackProtectorFI) == TargetStackID::ScalableVector)
2718+
if (MFI.isScalableStackID(StackProtectorFI))
27162719
ObjectsToAllocate.push_back(StackProtectorFI);
27172720
}
27182721
for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
2719-
unsigned StackID = MFI.getStackID(I);
2720-
if (StackID != TargetStackID::ScalableVector)
2722+
if (!MFI.isScalableStackID(I))
27212723
continue;
27222724
if (I == StackProtectorFI)
27232725
continue;
@@ -3721,8 +3723,7 @@ void AArch64FrameLowering::orderFrameObjects(
37213723
if (AFI.hasStackHazardSlotIndex()) {
37223724
std::optional<int> FI = getLdStFrameID(MI, MFI);
37233725
if (FI && *FI >= 0 && *FI < (int)FrameObjects.size()) {
3724-
if (MFI.getStackID(*FI) == TargetStackID::ScalableVector ||
3725-
AArch64InstrInfo::isFpOrNEON(MI))
3726+
if (MFI.isScalableStackID(*FI) || AArch64InstrInfo::isFpOrNEON(MI))
37263727
FrameObjects[*FI].Accesses |= FrameObject::AccessFPR;
37273728
else
37283729
FrameObjects[*FI].Accesses |= FrameObject::AccessGPR;
@@ -4080,7 +4081,7 @@ void AArch64FrameLowering::emitRemarks(
40804081
}
40814082

40824083
unsigned RegTy = StackAccess::AccessType::GPR;
4083-
if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector) {
4084+
if (MFI.isScalableStackID(FrameIdx)) {
40844085
// SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO
40854086
// spill/fill the predicate as a data vector (so are an FPR access).
40864087
if (MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO &&

llvm/lib/Target/AArch64/AArch64FrameLowering.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,7 @@ class AArch64FrameLowering : public TargetFrameLowering {
124124
return false;
125125
case TargetStackID::Default:
126126
case TargetStackID::ScalableVector:
127+
case TargetStackID::ScalablePredicateVector:
127128
case TargetStackID::NoAlloc:
128129
return true;
129130
}
@@ -132,7 +133,8 @@ class AArch64FrameLowering : public TargetFrameLowering {
132133
bool isStackIdSafeForLocalArea(unsigned StackId) const override {
133134
// We don't support putting SVE objects into the pre-allocated local
134135
// frame block at the moment.
135-
return StackId != TargetStackID::ScalableVector;
136+
return (StackId != TargetStackID::ScalableVector &&
137+
StackId != TargetStackID::ScalablePredicateVector);
136138
}
137139

138140
void

llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7515,7 +7515,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
75157515
int FI = cast<FrameIndexSDNode>(N)->getIndex();
75167516
// We can only encode VL scaled offsets, so only fold in frame indexes
75177517
// referencing SVE objects.
7518-
if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
7518+
if (MFI.isScalableStackID(FI)) {
75197519
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
75207520
OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64);
75217521
return true;
@@ -7561,7 +7561,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
75617561
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
75627562
// We can only encode VL scaled offsets, so only fold in frame indexes
75637563
// referencing SVE objects.
7564-
if (MFI.getStackID(FI) == TargetStackID::ScalableVector)
7564+
if (MFI.isScalableStackID(FI))
75657565
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
75667566
}
75677567

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9171,8 +9171,7 @@ void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
91719171
(MI.getOpcode() == AArch64::ADDXri ||
91729172
MI.getOpcode() == AArch64::SUBXri)) {
91739173
const MachineOperand &MO = MI.getOperand(1);
9174-
if (MO.isFI() && MF.getFrameInfo().getStackID(MO.getIndex()) ==
9175-
TargetStackID::ScalableVector)
9174+
if (MO.isFI() && MF.getFrameInfo().isScalableStackID(MO.getIndex()))
91769175
MI.addOperand(MachineOperand::CreateReg(AArch64::VG, /*IsDef=*/false,
91779176
/*IsImplicit=*/true));
91789177
}
@@ -9643,8 +9642,12 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
96439642
Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty);
96449643
MachineFrameInfo &MFI = MF.getFrameInfo();
96459644
int FI = MFI.CreateStackObject(StoreSize, Alignment, false);
9646-
if (isScalable)
9647-
MFI.setStackID(FI, TargetStackID::ScalableVector);
9645+
if (isScalable) {
9646+
bool IsPred = VA.getValVT() == MVT::aarch64svcount ||
9647+
VA.getValVT().getVectorElementType() == MVT::i1;
9648+
MFI.setStackID(FI, IsPred ? TargetStackID::ScalablePredicateVector
9649+
: TargetStackID::ScalableVector);
9650+
}
96489651

96499652
MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
96509653
SDValue Ptr = DAG.getFrameIndex(
@@ -29382,7 +29385,7 @@ void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const {
2938229385
// than doing it here in finalizeLowering.
2938329386
if (MFI.hasStackProtectorIndex()) {
2938429387
for (unsigned int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
29385-
if (MFI.getStackID(i) == TargetStackID::ScalableVector &&
29388+
if (MFI.isScalableStackID(i) &&
2938629389
MFI.getObjectSSPLayout(i) != MachineFrameInfo::SSPLK_None) {
2938729390
MFI.setStackID(MFI.getStackProtectorIndex(),
2938829391
TargetStackID::ScalableVector);

llvm/lib/Target/AArch64/AArch64InstrInfo.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5592,7 +5592,7 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
55925592
assert(Subtarget.isSVEorStreamingSVEAvailable() &&
55935593
"Unexpected register store without SVE store instructions");
55945594
Opc = AArch64::STR_PXI;
5595-
StackID = TargetStackID::ScalableVector;
5595+
StackID = TargetStackID::ScalablePredicateVector;
55965596
}
55975597
break;
55985598
}
@@ -5607,7 +5607,7 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
56075607
Opc = AArch64::STRSui;
56085608
else if (AArch64::PPR2RegClass.hasSubClassEq(RC)) {
56095609
Opc = AArch64::STR_PPXI;
5610-
StackID = TargetStackID::ScalableVector;
5610+
StackID = TargetStackID::ScalablePredicateVector;
56115611
}
56125612
break;
56135613
case 8:
@@ -5777,7 +5777,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
57775777
if (IsPNR)
57785778
PNRReg = DestReg;
57795779
Opc = AArch64::LDR_PXI;
5780-
StackID = TargetStackID::ScalableVector;
5780+
StackID = TargetStackID::ScalablePredicateVector;
57815781
}
57825782
break;
57835783
}
@@ -5792,7 +5792,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
57925792
Opc = AArch64::LDRSui;
57935793
else if (AArch64::PPR2RegClass.hasSubClassEq(RC)) {
57945794
Opc = AArch64::LDR_PPXI;
5795-
StackID = TargetStackID::ScalableVector;
5795+
StackID = TargetStackID::ScalablePredicateVector;
57965796
}
57975797
break;
57985798
case 8:

llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1158,7 +1158,7 @@ void AArch64PrologueEmitter::emitCalleeSavedGPRLocations(
11581158
CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
11591159
for (const auto &Info : CSI) {
11601160
unsigned FrameIdx = Info.getFrameIdx();
1161-
if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector)
1161+
if (MFI.isScalableStackID(FrameIdx))
11621162
continue;
11631163

11641164
assert(!Info.isSpilledToReg() && "Spilling to registers not implemented");
@@ -1185,7 +1185,7 @@ void AArch64PrologueEmitter::emitCalleeSavedSVELocations(
11851185
}
11861186

11871187
for (const auto &Info : CSI) {
1188-
if (MFI.getStackID(Info.getFrameIdx()) != TargetStackID::ScalableVector)
1188+
if (!MFI.isScalableStackID(Info.getFrameIdx()))
11891189
continue;
11901190

11911191
// Not all unwinders may know about SVE registers, so assume the lowest
@@ -1617,8 +1617,7 @@ void AArch64EpilogueEmitter::emitCalleeSavedRestores(
16171617
CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameDestroy);
16181618

16191619
for (const auto &Info : CSI) {
1620-
if (SVE !=
1621-
(MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector))
1620+
if (SVE != MFI.isScalableStackID(Info.getFrameIdx()))
16221621
continue;
16231622

16241623
MCRegister Reg = Info.getReg();

0 commit comments

Comments
 (0)