@@ -612,7 +612,7 @@ void AArch64FrameLowering::emitCalleeSavedGPRLocations(
612612 CFIInstBuilder CFIBuilder (MBB, MBBI, MachineInstr::FrameSetup);
613613 for (const auto &Info : CSI) {
614614 unsigned FrameIdx = Info.getFrameIdx ();
615- if (MFI.getStackID (FrameIdx) == TargetStackID::ScalableVector )
615+ if (MFI.isScalableStackID (FrameIdx))
616616 continue ;
617617
618618 assert (!Info.isSpilledToReg () && " Spilling to registers not implemented" );
@@ -645,7 +645,7 @@ void AArch64FrameLowering::emitCalleeSavedSVELocations(
645645 CFIInstBuilder CFIBuilder (MBB, MBBI, MachineInstr::FrameSetup);
646646
647647 for (const auto &Info : CSI) {
648- if (!( MFI.getStackID (Info.getFrameIdx ()) == TargetStackID::ScalableVector ))
648+ if (!MFI.isScalableStackID (Info.getFrameIdx ()))
649649 continue ;
650650
651651 // Not all unwinders may know about SVE registers, so assume the lowest
@@ -712,8 +712,7 @@ static void emitCalleeSavedRestores(MachineBasicBlock &MBB,
712712 CFIInstBuilder CFIBuilder (MBB, MBBI, MachineInstr::FrameDestroy);
713713
714714 for (const auto &Info : CSI) {
715- if (SVE !=
716- (MFI.getStackID (Info.getFrameIdx ()) == TargetStackID::ScalableVector))
715+ if (SVE != MFI.isScalableStackID (Info.getFrameIdx ()))
717716 continue ;
718717
719718 MCRegister Reg = Info.getReg ();
@@ -2749,7 +2748,7 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
27492748 const auto *AFI = MF.getInfo <AArch64FunctionInfo>();
27502749 bool FPAfterSVECalleeSaves =
27512750 isTargetWindows (MF) && AFI->getSVECalleeSavedStackSize ();
2752- if (MFI.getStackID (FI) == TargetStackID::ScalableVector ) {
2751+ if (MFI.isScalableStackID (FI)) {
27532752 if (FPAfterSVECalleeSaves &&
27542753 -ObjectOffset <= (int64_t )AFI->getSVECalleeSavedStackSize ())
27552754 return StackOffset::getScalable (ObjectOffset);
@@ -2815,7 +2814,7 @@ StackOffset AArch64FrameLowering::resolveFrameIndexReference(
28152814 const auto &MFI = MF.getFrameInfo ();
28162815 int64_t ObjectOffset = MFI.getObjectOffset (FI);
28172816 bool isFixed = MFI.isFixedObjectIndex (FI);
2818- bool isSVE = MFI.getStackID (FI) == TargetStackID::ScalableVector ;
2817+ bool isSVE = MFI.isScalableStackID (FI);
28192818 return resolveFrameOffsetReference (MF, ObjectOffset, isFixed, isSVE, FrameReg,
28202819 PreferFP, ForSimm);
28212820}
@@ -3551,10 +3550,14 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
35513550 }
35523551 // Update the StackIDs of the SVE stack slots.
35533552 MachineFrameInfo &MFI = MF.getFrameInfo ();
3554- if (RPI.Type == RegPairInfo::ZPR || RPI. Type == RegPairInfo::PPR ) {
3553+ if (RPI.Type == RegPairInfo::ZPR) {
35553554 MFI.setStackID (FrameIdxReg1, TargetStackID::ScalableVector);
35563555 if (RPI.isPaired ())
35573556 MFI.setStackID (FrameIdxReg2, TargetStackID::ScalableVector);
3557+ } else if (RPI.Type == RegPairInfo::PPR) {
3558+ MFI.setStackID (FrameIdxReg1, TargetStackID::ScalablePredVector);
3559+ if (RPI.isPaired ())
3560+ MFI.setStackID (FrameIdxReg2, TargetStackID::ScalablePredVector);
35583561 }
35593562
35603563 if (X0Scratch != AArch64::NoRegister)
@@ -3769,8 +3772,7 @@ void AArch64FrameLowering::determineStackHazardSlot(
37693772 for (auto &MI : MBB) {
37703773 std::optional<int > FI = getLdStFrameID (MI, MFI);
37713774 if (FI && *FI >= 0 && *FI < (int )FrameObjects.size ()) {
3772- if (MFI.getStackID (*FI) == TargetStackID::ScalableVector ||
3773- AArch64InstrInfo::isFpOrNEON (MI))
3775+ if (MFI.isScalableStackID (*FI) || AArch64InstrInfo::isFpOrNEON (MI))
37743776 FrameObjects[*FI] |= 2 ;
37753777 else
37763778 FrameObjects[*FI] |= 1 ;
@@ -4232,7 +4234,7 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
42324234#ifndef NDEBUG
42334235 // First process all fixed stack objects.
42344236 for (int I = MFI.getObjectIndexBegin (); I != 0 ; ++I)
4235- assert (MFI.getStackID (I) != TargetStackID::ScalableVector &&
4237+ assert (! MFI.isScalableStackID (I) &&
42364238 " SVE vectors should never be passed on the stack by value, only by "
42374239 " reference." );
42384240#endif
@@ -4266,12 +4268,11 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
42664268 int StackProtectorFI = -1 ;
42674269 if (MFI.hasStackProtectorIndex ()) {
42684270 StackProtectorFI = MFI.getStackProtectorIndex ();
4269- if (MFI.getStackID (StackProtectorFI) == TargetStackID::ScalableVector )
4271+ if (MFI.isScalableStackID (StackProtectorFI))
42704272 ObjectsToAllocate.push_back (StackProtectorFI);
42714273 }
42724274 for (int I = 0 , E = MFI.getObjectIndexEnd (); I != E; ++I) {
4273- unsigned StackID = MFI.getStackID (I);
4274- if (StackID != TargetStackID::ScalableVector)
4275+ if (!MFI.isScalableStackID (I))
42754276 continue ;
42764277 if (I == StackProtectorFI)
42774278 continue ;
@@ -5286,8 +5287,7 @@ void AArch64FrameLowering::orderFrameObjects(
52865287 if (AFI.hasStackHazardSlotIndex ()) {
52875288 std::optional<int > FI = getLdStFrameID (MI, MFI);
52885289 if (FI && *FI >= 0 && *FI < (int )FrameObjects.size ()) {
5289- if (MFI.getStackID (*FI) == TargetStackID::ScalableVector ||
5290- AArch64InstrInfo::isFpOrNEON (MI))
5290+ if (MFI.isScalableStackID (*FI) || AArch64InstrInfo::isFpOrNEON (MI))
52915291 FrameObjects[*FI].Accesses |= FrameObject::AccessFPR;
52925292 else
52935293 FrameObjects[*FI].Accesses |= FrameObject::AccessGPR;
@@ -5645,7 +5645,7 @@ void AArch64FrameLowering::emitRemarks(
56455645 }
56465646
56475647 unsigned RegTy = StackAccess::AccessType::GPR;
5648- if (MFI.getStackID (FrameIdx) == TargetStackID::ScalableVector ) {
5648+ if (MFI.isScalableStackID (FrameIdx)) {
56495649 // SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO
56505650 // spill/fill the predicate as a data vector (so are an FPR access).
56515651 if (MI.getOpcode () != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO &&
0 commit comments