@@ -606,7 +606,7 @@ void AArch64FrameLowering::emitCalleeSavedGPRLocations(
606606 CFIInstBuilder CFIBuilder (MBB, MBBI, MachineInstr::FrameSetup);
607607 for (const auto &Info : CSI) {
608608 unsigned FrameIdx = Info.getFrameIdx ();
609- if (MFI.getStackID (FrameIdx) == TargetStackID::ScalableVector )
609+ if (MFI.isScalableStackID (FrameIdx))
610610 continue ;
611611
612612 assert (!Info.isSpilledToReg () && " Spilling to registers not implemented" );
@@ -639,7 +639,7 @@ void AArch64FrameLowering::emitCalleeSavedSVELocations(
639639 CFIInstBuilder CFIBuilder (MBB, MBBI, MachineInstr::FrameSetup);
640640
641641 for (const auto &Info : CSI) {
642- if (!( MFI.getStackID (Info.getFrameIdx ()) == TargetStackID::ScalableVector ))
642+ if (!MFI.isScalableStackID (Info.getFrameIdx ()))
643643 continue ;
644644
645645 // Not all unwinders may know about SVE registers, so assume the lowest
@@ -706,8 +706,7 @@ static void emitCalleeSavedRestores(MachineBasicBlock &MBB,
706706 CFIInstBuilder CFIBuilder (MBB, MBBI, MachineInstr::FrameDestroy);
707707
708708 for (const auto &Info : CSI) {
709- if (SVE !=
710- (MFI.getStackID (Info.getFrameIdx ()) == TargetStackID::ScalableVector))
709+ if (SVE != MFI.isScalableStackID (Info.getFrameIdx ()))
711710 continue ;
712711
713712 MCRegister Reg = Info.getReg ();
@@ -2587,10 +2586,9 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
25872586 return StackOffset::getFixed (ObjectOffset - getOffsetOfLocalArea ());
25882587
25892588 const auto *AFI = MF.getInfo <AArch64FunctionInfo>();
2590- if (MFI.getStackID (FI) == TargetStackID::ScalableVector) {
2589+ if (MFI.isScalableStackID (FI))
25912590 return StackOffset::get (-((int64_t )AFI->getCalleeSavedStackSize ()),
25922591 ObjectOffset);
2593- }
25942592
25952593 bool IsFixed = MFI.isFixedObjectIndex (FI);
25962594 bool IsCSR =
@@ -2646,7 +2644,7 @@ StackOffset AArch64FrameLowering::resolveFrameIndexReference(
26462644 const auto &MFI = MF.getFrameInfo ();
26472645 int64_t ObjectOffset = MFI.getObjectOffset (FI);
26482646 bool isFixed = MFI.isFixedObjectIndex (FI);
2649- bool isSVE = MFI.getStackID (FI) == TargetStackID::ScalableVector ;
2647+ bool isSVE = MFI.isScalableStackID (FI);
26502648 return resolveFrameOffsetReference (MF, ObjectOffset, isFixed, isSVE, FrameReg,
26512649 PreferFP, ForSimm);
26522650}
@@ -3348,10 +3346,14 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
33483346 }
33493347 // Update the StackIDs of the SVE stack slots.
33503348 MachineFrameInfo &MFI = MF.getFrameInfo ();
3351- if (RPI.Type == RegPairInfo::ZPR || RPI. Type == RegPairInfo::PPR ) {
3349+ if (RPI.Type == RegPairInfo::ZPR) {
33523350 MFI.setStackID (FrameIdxReg1, TargetStackID::ScalableVector);
33533351 if (RPI.isPaired ())
33543352 MFI.setStackID (FrameIdxReg2, TargetStackID::ScalableVector);
3353+ } else if (RPI.Type == RegPairInfo::PPR) {
3354+ MFI.setStackID (FrameIdxReg1, TargetStackID::ScalablePredVector);
3355+ if (RPI.isPaired ())
3356+ MFI.setStackID (FrameIdxReg2, TargetStackID::ScalablePredVector);
33553357 }
33563358
33573359 if (X0Scratch != AArch64::NoRegister)
@@ -3565,8 +3567,7 @@ void AArch64FrameLowering::determineStackHazardSlot(
35653567 for (auto &MI : MBB) {
35663568 std::optional<int > FI = getLdStFrameID (MI, MFI);
35673569 if (FI && *FI >= 0 && *FI < (int )FrameObjects.size ()) {
3568- if (MFI.getStackID (*FI) == TargetStackID::ScalableVector ||
3569- AArch64InstrInfo::isFpOrNEON (MI))
3570+ if (MFI.isScalableStackID (*FI) || AArch64InstrInfo::isFpOrNEON (MI))
35703571 FrameObjects[*FI] |= 2 ;
35713572 else
35723573 FrameObjects[*FI] |= 1 ;
@@ -4029,7 +4030,7 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
40294030#ifndef NDEBUG
40304031 // First process all fixed stack objects.
40314032 for (int I = MFI.getObjectIndexBegin (); I != 0 ; ++I)
4032- assert (MFI.getStackID (I) != TargetStackID::ScalableVector &&
4033+ assert (! MFI.isScalableStackID (I) &&
40334034 " SVE vectors should never be passed on the stack by value, only by "
40344035 " reference." );
40354036#endif
@@ -4063,12 +4064,11 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
40634064 int StackProtectorFI = -1 ;
40644065 if (MFI.hasStackProtectorIndex ()) {
40654066 StackProtectorFI = MFI.getStackProtectorIndex ();
4066- if (MFI.getStackID (StackProtectorFI) == TargetStackID::ScalableVector )
4067+ if (MFI.isScalableStackID (StackProtectorFI))
40674068 ObjectsToAllocate.push_back (StackProtectorFI);
40684069 }
40694070 for (int I = 0 , E = MFI.getObjectIndexEnd (); I != E; ++I) {
4070- unsigned StackID = MFI.getStackID (I);
4071- if (StackID != TargetStackID::ScalableVector)
4071+ if (!MFI.isScalableStackID (I))
40724072 continue ;
40734073 if (I == StackProtectorFI)
40744074 continue ;
@@ -5083,8 +5083,7 @@ void AArch64FrameLowering::orderFrameObjects(
50835083 if (AFI.hasStackHazardSlotIndex ()) {
50845084 std::optional<int > FI = getLdStFrameID (MI, MFI);
50855085 if (FI && *FI >= 0 && *FI < (int )FrameObjects.size ()) {
5086- if (MFI.getStackID (*FI) == TargetStackID::ScalableVector ||
5087- AArch64InstrInfo::isFpOrNEON (MI))
5086+ if (MFI.isScalableStackID (*FI) || AArch64InstrInfo::isFpOrNEON (MI))
50885087 FrameObjects[*FI].Accesses |= FrameObject::AccessFPR;
50895088 else
50905089 FrameObjects[*FI].Accesses |= FrameObject::AccessGPR;
@@ -5442,7 +5441,7 @@ void AArch64FrameLowering::emitRemarks(
54425441 }
54435442
54445443 unsigned RegTy = StackAccess::AccessType::GPR;
5445- if (MFI.getStackID (FrameIdx) == TargetStackID::ScalableVector ) {
5444+ if (MFI.isScalableStackID (FrameIdx)) {
54465445 // SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO
54475446 // spill/fill the predicate as a data vector (so are an FPR acess).
54485447 if (MI.getOpcode () != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO &&
0 commit comments