@@ -324,7 +324,7 @@ AArch64FrameLowering::getArgumentStackToRestore(MachineFunction &MF,
324
324
static bool produceCompactUnwindFrame (const AArch64FrameLowering &,
325
325
MachineFunction &MF);
326
326
327
- // Conservatively, returns true if the function is likely to have an SVE vectors
327
+ // Conservatively, returns true if the function is likely to have SVE vectors
328
328
// on the stack. This function is safe to be called before callee-saves or
329
329
// object offsets have been determined.
330
330
static bool isLikelyToHaveSVEStack (const AArch64FrameLowering &AFL,
@@ -338,7 +338,7 @@ static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL,
338
338
339
339
const MachineFrameInfo &MFI = MF.getFrameInfo ();
340
340
for (int FI = MFI.getObjectIndexBegin (); FI < MFI.getObjectIndexEnd (); FI++) {
341
- if (MFI.getStackID (FI) == TargetStackID::ScalableVector )
341
+ if (MFI.isScalableStackID (FI))
342
342
return true ;
343
343
}
344
344
@@ -1228,7 +1228,7 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
1228
1228
const auto *AFI = MF.getInfo <AArch64FunctionInfo>();
1229
1229
bool FPAfterSVECalleeSaves =
1230
1230
isTargetWindows (MF) && AFI->getSVECalleeSavedStackSize ();
1231
- if (MFI.getStackID (FI) == TargetStackID::ScalableVector ) {
1231
+ if (MFI.isScalableStackID (FI)) {
1232
1232
if (FPAfterSVECalleeSaves &&
1233
1233
-ObjectOffset <= (int64_t )AFI->getSVECalleeSavedStackSize ())
1234
1234
return StackOffset::getScalable (ObjectOffset);
@@ -1294,7 +1294,7 @@ StackOffset AArch64FrameLowering::resolveFrameIndexReference(
1294
1294
const auto &MFI = MF.getFrameInfo ();
1295
1295
int64_t ObjectOffset = MFI.getObjectOffset (FI);
1296
1296
bool isFixed = MFI.isFixedObjectIndex (FI);
1297
- bool isSVE = MFI.getStackID (FI) == TargetStackID::ScalableVector ;
1297
+ bool isSVE = MFI.isScalableStackID (FI);
1298
1298
return resolveFrameOffsetReference (MF, ObjectOffset, isFixed, isSVE, FrameReg,
1299
1299
PreferFP, ForSimm);
1300
1300
}
@@ -2021,10 +2021,14 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
2021
2021
}
2022
2022
// Update the StackIDs of the SVE stack slots.
2023
2023
MachineFrameInfo &MFI = MF.getFrameInfo ();
2024
- if (RPI.Type == RegPairInfo::ZPR || RPI. Type == RegPairInfo::PPR ) {
2024
+ if (RPI.Type == RegPairInfo::ZPR) {
2025
2025
MFI.setStackID (FrameIdxReg1, TargetStackID::ScalableVector);
2026
2026
if (RPI.isPaired ())
2027
2027
MFI.setStackID (FrameIdxReg2, TargetStackID::ScalableVector);
2028
+ } else if (RPI.Type == RegPairInfo::PPR) {
2029
+ MFI.setStackID (FrameIdxReg1, TargetStackID::ScalablePredicateVector);
2030
+ if (RPI.isPaired ())
2031
+ MFI.setStackID (FrameIdxReg2, TargetStackID::ScalablePredicateVector);
2028
2032
}
2029
2033
}
2030
2034
return true ;
@@ -2232,8 +2236,7 @@ void AArch64FrameLowering::determineStackHazardSlot(
2232
2236
for (auto &MI : MBB) {
2233
2237
std::optional<int > FI = getLdStFrameID (MI, MFI);
2234
2238
if (FI && *FI >= 0 && *FI < (int )FrameObjects.size ()) {
2235
- if (MFI.getStackID (*FI) == TargetStackID::ScalableVector ||
2236
- AArch64InstrInfo::isFpOrNEON (MI))
2239
+ if (MFI.isScalableStackID (*FI) || AArch64InstrInfo::isFpOrNEON (MI))
2237
2240
FrameObjects[*FI] |= 2 ;
2238
2241
else
2239
2242
FrameObjects[*FI] |= 1 ;
@@ -2678,7 +2681,7 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
2678
2681
#ifndef NDEBUG
2679
2682
// First process all fixed stack objects.
2680
2683
for (int I = MFI.getObjectIndexBegin (); I != 0 ; ++I)
2681
- assert (MFI.getStackID (I) != TargetStackID::ScalableVector &&
2684
+ assert (! MFI.isScalableStackID (I) &&
2682
2685
" SVE vectors should never be passed on the stack by value, only by "
2683
2686
" reference." );
2684
2687
#endif
@@ -2712,12 +2715,11 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
2712
2715
int StackProtectorFI = -1 ;
2713
2716
if (MFI.hasStackProtectorIndex ()) {
2714
2717
StackProtectorFI = MFI.getStackProtectorIndex ();
2715
- if (MFI.getStackID (StackProtectorFI) == TargetStackID::ScalableVector )
2718
+ if (MFI.isScalableStackID (StackProtectorFI))
2716
2719
ObjectsToAllocate.push_back (StackProtectorFI);
2717
2720
}
2718
2721
for (int I = 0 , E = MFI.getObjectIndexEnd (); I != E; ++I) {
2719
- unsigned StackID = MFI.getStackID (I);
2720
- if (StackID != TargetStackID::ScalableVector)
2722
+ if (!MFI.isScalableStackID (I))
2721
2723
continue ;
2722
2724
if (I == StackProtectorFI)
2723
2725
continue ;
@@ -3721,8 +3723,7 @@ void AArch64FrameLowering::orderFrameObjects(
3721
3723
if (AFI.hasStackHazardSlotIndex ()) {
3722
3724
std::optional<int > FI = getLdStFrameID (MI, MFI);
3723
3725
if (FI && *FI >= 0 && *FI < (int )FrameObjects.size ()) {
3724
- if (MFI.getStackID (*FI) == TargetStackID::ScalableVector ||
3725
- AArch64InstrInfo::isFpOrNEON (MI))
3726
+ if (MFI.isScalableStackID (*FI) || AArch64InstrInfo::isFpOrNEON (MI))
3726
3727
FrameObjects[*FI].Accesses |= FrameObject::AccessFPR;
3727
3728
else
3728
3729
FrameObjects[*FI].Accesses |= FrameObject::AccessGPR;
@@ -4080,7 +4081,7 @@ void AArch64FrameLowering::emitRemarks(
4080
4081
}
4081
4082
4082
4083
unsigned RegTy = StackAccess::AccessType::GPR;
4083
- if (MFI.getStackID (FrameIdx) == TargetStackID::ScalableVector ) {
4084
+ if (MFI.isScalableStackID (FrameIdx)) {
4084
4085
// SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO
4085
4086
// spill/fill the predicate as a data vector (so are an FPR access).
4086
4087
if (MI.getOpcode () != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO &&
0 commit comments