Skip to content

Commit 9f5abd3

Browse files
[Codegen] Add a separate stack ID for scalable predicates (llvm#142390)
This splits out "ScalablePredicateVector" from the "ScalableVector" StackID this is primarily to allow easy differentiation between vectors and predicates (without inspecting instructions). This new stack ID is not used in many places yet, but will be used in a later patch to mark stack slots that are known to contain predicates. Co-authored-by: Kerry McLaughlin <[email protected]>
1 parent c4e1bca commit 9f5abd3

17 files changed

+71
-54
lines changed

llvm/include/llvm/CodeGen/MIRYamlMapping.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -378,6 +378,8 @@ struct ScalarEnumerationTraits<TargetStackID::Value> {
378378
IO.enumCase(ID, "default", TargetStackID::Default);
379379
IO.enumCase(ID, "sgpr-spill", TargetStackID::SGPRSpill);
380380
IO.enumCase(ID, "scalable-vector", TargetStackID::ScalableVector);
381+
IO.enumCase(ID, "scalable-predicate-vector",
382+
TargetStackID::ScalablePredicateVector);
381383
IO.enumCase(ID, "wasm-local", TargetStackID::WasmLocal);
382384
IO.enumCase(ID, "noalloc", TargetStackID::NoAlloc);
383385
}

llvm/include/llvm/CodeGen/MachineFrameInfo.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -497,7 +497,14 @@ class MachineFrameInfo {
497497
/// Should this stack ID be considered in MaxAlignment.
498498
bool contributesToMaxAlignment(uint8_t StackID) {
499499
return StackID == TargetStackID::Default ||
500-
StackID == TargetStackID::ScalableVector;
500+
StackID == TargetStackID::ScalableVector ||
501+
StackID == TargetStackID::ScalablePredicateVector;
502+
}
503+
504+
bool isScalableStackID(int ObjectIdx) const {
505+
uint8_t StackID = getStackID(ObjectIdx);
506+
return StackID == TargetStackID::ScalableVector ||
507+
StackID == TargetStackID::ScalablePredicateVector;
501508
}
502509

503510
/// setObjectAlignment - Change the alignment of the specified stack object.

llvm/include/llvm/CodeGen/TargetFrameLowering.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ enum Value {
3232
SGPRSpill = 1,
3333
ScalableVector = 2,
3434
WasmLocal = 3,
35+
ScalablePredicateVector = 4,
3536
NoAlloc = 255
3637
};
3738
}

llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ struct StackFrameLayoutAnalysis {
7272
: Slot(Idx), Size(MFI.getObjectSize(Idx)),
7373
Align(MFI.getObjectAlign(Idx).value()), Offset(Offset),
7474
SlotTy(Invalid), Scalable(false) {
75-
Scalable = MFI.getStackID(Idx) == TargetStackID::ScalableVector;
75+
Scalable = MFI.isScalableStackID(Idx);
7676
if (MFI.isSpillSlotObjectIndex(Idx))
7777
SlotTy = SlotType::Spill;
7878
else if (MFI.isFixedObjectIndex(Idx))

llvm/lib/Target/AArch64/AArch64FrameLowering.cpp

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,7 @@ AArch64FrameLowering::getArgumentStackToRestore(MachineFunction &MF,
324324
static bool produceCompactUnwindFrame(const AArch64FrameLowering &,
325325
MachineFunction &MF);
326326

327-
// Conservatively, returns true if the function is likely to have an SVE vectors
327+
// Conservatively, returns true if the function is likely to have SVE vectors
328328
// on the stack. This function is safe to be called before callee-saves or
329329
// object offsets have been determined.
330330
static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL,
@@ -338,7 +338,7 @@ static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL,
338338

339339
const MachineFrameInfo &MFI = MF.getFrameInfo();
340340
for (int FI = MFI.getObjectIndexBegin(); FI < MFI.getObjectIndexEnd(); FI++) {
341-
if (MFI.getStackID(FI) == TargetStackID::ScalableVector)
341+
if (MFI.isScalableStackID(FI))
342342
return true;
343343
}
344344

@@ -1228,7 +1228,7 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
12281228
const auto *AFI = MF.getInfo<AArch64FunctionInfo>();
12291229
bool FPAfterSVECalleeSaves =
12301230
isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize();
1231-
if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
1231+
if (MFI.isScalableStackID(FI)) {
12321232
if (FPAfterSVECalleeSaves &&
12331233
-ObjectOffset <= (int64_t)AFI->getSVECalleeSavedStackSize())
12341234
return StackOffset::getScalable(ObjectOffset);
@@ -1294,7 +1294,7 @@ StackOffset AArch64FrameLowering::resolveFrameIndexReference(
12941294
const auto &MFI = MF.getFrameInfo();
12951295
int64_t ObjectOffset = MFI.getObjectOffset(FI);
12961296
bool isFixed = MFI.isFixedObjectIndex(FI);
1297-
bool isSVE = MFI.getStackID(FI) == TargetStackID::ScalableVector;
1297+
bool isSVE = MFI.isScalableStackID(FI);
12981298
return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, isSVE, FrameReg,
12991299
PreferFP, ForSimm);
13001300
}
@@ -2021,10 +2021,14 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
20212021
}
20222022
// Update the StackIDs of the SVE stack slots.
20232023
MachineFrameInfo &MFI = MF.getFrameInfo();
2024-
if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR) {
2024+
if (RPI.Type == RegPairInfo::ZPR) {
20252025
MFI.setStackID(FrameIdxReg1, TargetStackID::ScalableVector);
20262026
if (RPI.isPaired())
20272027
MFI.setStackID(FrameIdxReg2, TargetStackID::ScalableVector);
2028+
} else if (RPI.Type == RegPairInfo::PPR) {
2029+
MFI.setStackID(FrameIdxReg1, TargetStackID::ScalablePredicateVector);
2030+
if (RPI.isPaired())
2031+
MFI.setStackID(FrameIdxReg2, TargetStackID::ScalablePredicateVector);
20282032
}
20292033
}
20302034
return true;
@@ -2232,8 +2236,7 @@ void AArch64FrameLowering::determineStackHazardSlot(
22322236
for (auto &MI : MBB) {
22332237
std::optional<int> FI = getLdStFrameID(MI, MFI);
22342238
if (FI && *FI >= 0 && *FI < (int)FrameObjects.size()) {
2235-
if (MFI.getStackID(*FI) == TargetStackID::ScalableVector ||
2236-
AArch64InstrInfo::isFpOrNEON(MI))
2239+
if (MFI.isScalableStackID(*FI) || AArch64InstrInfo::isFpOrNEON(MI))
22372240
FrameObjects[*FI] |= 2;
22382241
else
22392242
FrameObjects[*FI] |= 1;
@@ -2678,7 +2681,7 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
26782681
#ifndef NDEBUG
26792682
// First process all fixed stack objects.
26802683
for (int I = MFI.getObjectIndexBegin(); I != 0; ++I)
2681-
assert(MFI.getStackID(I) != TargetStackID::ScalableVector &&
2684+
assert(!MFI.isScalableStackID(I) &&
26822685
"SVE vectors should never be passed on the stack by value, only by "
26832686
"reference.");
26842687
#endif
@@ -2712,12 +2715,11 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
27122715
int StackProtectorFI = -1;
27132716
if (MFI.hasStackProtectorIndex()) {
27142717
StackProtectorFI = MFI.getStackProtectorIndex();
2715-
if (MFI.getStackID(StackProtectorFI) == TargetStackID::ScalableVector)
2718+
if (MFI.isScalableStackID(StackProtectorFI))
27162719
ObjectsToAllocate.push_back(StackProtectorFI);
27172720
}
27182721
for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
2719-
unsigned StackID = MFI.getStackID(I);
2720-
if (StackID != TargetStackID::ScalableVector)
2722+
if (!MFI.isScalableStackID(I))
27212723
continue;
27222724
if (I == StackProtectorFI)
27232725
continue;
@@ -3721,8 +3723,7 @@ void AArch64FrameLowering::orderFrameObjects(
37213723
if (AFI.hasStackHazardSlotIndex()) {
37223724
std::optional<int> FI = getLdStFrameID(MI, MFI);
37233725
if (FI && *FI >= 0 && *FI < (int)FrameObjects.size()) {
3724-
if (MFI.getStackID(*FI) == TargetStackID::ScalableVector ||
3725-
AArch64InstrInfo::isFpOrNEON(MI))
3726+
if (MFI.isScalableStackID(*FI) || AArch64InstrInfo::isFpOrNEON(MI))
37263727
FrameObjects[*FI].Accesses |= FrameObject::AccessFPR;
37273728
else
37283729
FrameObjects[*FI].Accesses |= FrameObject::AccessGPR;
@@ -4080,7 +4081,7 @@ void AArch64FrameLowering::emitRemarks(
40804081
}
40814082

40824083
unsigned RegTy = StackAccess::AccessType::GPR;
4083-
if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector) {
4084+
if (MFI.isScalableStackID(FrameIdx)) {
40844085
// SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO
40854086
// spill/fill the predicate as a data vector (so are an FPR access).
40864087
if (MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO &&

llvm/lib/Target/AArch64/AArch64FrameLowering.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,7 @@ class AArch64FrameLowering : public TargetFrameLowering {
124124
return false;
125125
case TargetStackID::Default:
126126
case TargetStackID::ScalableVector:
127+
case TargetStackID::ScalablePredicateVector:
127128
case TargetStackID::NoAlloc:
128129
return true;
129130
}
@@ -132,7 +133,8 @@ class AArch64FrameLowering : public TargetFrameLowering {
132133
bool isStackIdSafeForLocalArea(unsigned StackId) const override {
133134
// We don't support putting SVE objects into the pre-allocated local
134135
// frame block at the moment.
135-
return StackId != TargetStackID::ScalableVector;
136+
return (StackId != TargetStackID::ScalableVector &&
137+
StackId != TargetStackID::ScalablePredicateVector);
136138
}
137139

138140
void

llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7497,7 +7497,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
74977497
int FI = cast<FrameIndexSDNode>(N)->getIndex();
74987498
// We can only encode VL scaled offsets, so only fold in frame indexes
74997499
// referencing SVE objects.
7500-
if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
7500+
if (MFI.isScalableStackID(FI)) {
75017501
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
75027502
OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64);
75037503
return true;
@@ -7543,7 +7543,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
75437543
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
75447544
// We can only encode VL scaled offsets, so only fold in frame indexes
75457545
// referencing SVE objects.
7546-
if (MFI.getStackID(FI) == TargetStackID::ScalableVector)
7546+
if (MFI.isScalableStackID(FI))
75477547
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
75487548
}
75497549

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9256,8 +9256,7 @@ void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
92569256
(MI.getOpcode() == AArch64::ADDXri ||
92579257
MI.getOpcode() == AArch64::SUBXri)) {
92589258
const MachineOperand &MO = MI.getOperand(1);
9259-
if (MO.isFI() && MF.getFrameInfo().getStackID(MO.getIndex()) ==
9260-
TargetStackID::ScalableVector)
9259+
if (MO.isFI() && MF.getFrameInfo().isScalableStackID(MO.getIndex()))
92619260
MI.addOperand(MachineOperand::CreateReg(AArch64::VG, /*IsDef=*/false,
92629261
/*IsImplicit=*/true));
92639262
}
@@ -9704,8 +9703,12 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
97049703
Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty);
97059704
MachineFrameInfo &MFI = MF.getFrameInfo();
97069705
int FI = MFI.CreateStackObject(StoreSize, Alignment, false);
9707-
if (isScalable)
9708-
MFI.setStackID(FI, TargetStackID::ScalableVector);
9706+
if (isScalable) {
9707+
bool IsPred = VA.getValVT() == MVT::aarch64svcount ||
9708+
VA.getValVT().getVectorElementType() == MVT::i1;
9709+
MFI.setStackID(FI, IsPred ? TargetStackID::ScalablePredicateVector
9710+
: TargetStackID::ScalableVector);
9711+
}
97099712

97109713
MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
97119714
SDValue Ptr = DAG.getFrameIndex(
@@ -29605,7 +29608,7 @@ void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const {
2960529608
// than doing it here in finalizeLowering.
2960629609
if (MFI.hasStackProtectorIndex()) {
2960729610
for (unsigned int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
29608-
if (MFI.getStackID(i) == TargetStackID::ScalableVector &&
29611+
if (MFI.isScalableStackID(i) &&
2960929612
MFI.getObjectSSPLayout(i) != MachineFrameInfo::SSPLK_None) {
2961029613
MFI.setStackID(MFI.getStackProtectorIndex(),
2961129614
TargetStackID::ScalableVector);

llvm/lib/Target/AArch64/AArch64InstrInfo.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5599,7 +5599,7 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
55995599
assert(Subtarget.isSVEorStreamingSVEAvailable() &&
56005600
"Unexpected register store without SVE store instructions");
56015601
Opc = AArch64::STR_PXI;
5602-
StackID = TargetStackID::ScalableVector;
5602+
StackID = TargetStackID::ScalablePredicateVector;
56035603
}
56045604
break;
56055605
}
@@ -5614,7 +5614,7 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
56145614
Opc = AArch64::STRSui;
56155615
else if (AArch64::PPR2RegClass.hasSubClassEq(RC)) {
56165616
Opc = AArch64::STR_PPXI;
5617-
StackID = TargetStackID::ScalableVector;
5617+
StackID = TargetStackID::ScalablePredicateVector;
56185618
}
56195619
break;
56205620
case 8:
@@ -5784,7 +5784,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
57845784
if (IsPNR)
57855785
PNRReg = DestReg;
57865786
Opc = AArch64::LDR_PXI;
5787-
StackID = TargetStackID::ScalableVector;
5787+
StackID = TargetStackID::ScalablePredicateVector;
57885788
}
57895789
break;
57905790
}
@@ -5799,7 +5799,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
57995799
Opc = AArch64::LDRSui;
58005800
else if (AArch64::PPR2RegClass.hasSubClassEq(RC)) {
58015801
Opc = AArch64::LDR_PPXI;
5802-
StackID = TargetStackID::ScalableVector;
5802+
StackID = TargetStackID::ScalablePredicateVector;
58035803
}
58045804
break;
58055805
case 8:

llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1165,7 +1165,7 @@ void AArch64PrologueEmitter::emitCalleeSavedGPRLocations(
11651165
CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
11661166
for (const auto &Info : CSI) {
11671167
unsigned FrameIdx = Info.getFrameIdx();
1168-
if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector)
1168+
if (MFI.isScalableStackID(FrameIdx))
11691169
continue;
11701170

11711171
assert(!Info.isSpilledToReg() && "Spilling to registers not implemented");
@@ -1192,7 +1192,7 @@ void AArch64PrologueEmitter::emitCalleeSavedSVELocations(
11921192
}
11931193

11941194
for (const auto &Info : CSI) {
1195-
if (MFI.getStackID(Info.getFrameIdx()) != TargetStackID::ScalableVector)
1195+
if (!MFI.isScalableStackID(Info.getFrameIdx()))
11961196
continue;
11971197

11981198
// Not all unwinders may know about SVE registers, so assume the lowest
@@ -1624,8 +1624,7 @@ void AArch64EpilogueEmitter::emitCalleeSavedRestores(
16241624
CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameDestroy);
16251625

16261626
for (const auto &Info : CSI) {
1627-
if (SVE !=
1628-
(MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector))
1627+
if (SVE != MFI.isScalableStackID(Info.getFrameIdx()))
16291628
continue;
16301629

16311630
MCRegister Reg = Info.getReg();

0 commit comments

Comments
 (0)