Skip to content

Commit 8bb477a

Browse files
committed
use isScalar and isFixedVector
1 parent 7c00299 commit 8bb477a

11 files changed

+124
-144
lines changed

llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6816,7 +6816,7 @@ bool CombinerHelper::tryFoldSelectOfConstants(GSelect *Select,
68166816
LLT TrueTy = MRI.getType(Select->getTrueReg());
68176817

68186818
// We only do this combine for scalar boolean conditions.
6819-
if (CondTy != LLT::scalar(1))
6819+
if (!CondTy.isScalar(1))
68206820
return false;
68216821

68226822
if (TrueTy.isPointer())

llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp

Lines changed: 19 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -2099,7 +2099,7 @@ LegalizerHelper::widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx,
20992099
const unsigned Offset = (I - 1) * PartSize;
21002100

21012101
Register SrcReg = MI.getOperand(I).getReg();
2102-
assert(MRI.getType(SrcReg) == LLT::scalar(PartSize));
2102+
assert(MRI.getType(SrcReg).isScalar(PartSize));
21032103

21042104
auto ZextInput = MIRBuilder.buildZExt(WideTy, SrcReg);
21052105

@@ -6596,7 +6596,7 @@ LegalizerHelper::narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx,
65966596
// If all finite floats fit into the narrowed integer type, we can just swap
65976597
// out the result type. This is practically only useful for conversions from
65986598
// half to at least 16-bits, so just handle the one case.
6599-
if (SrcTy.getScalarType() != LLT::scalar(16) ||
6599+
if (!SrcTy.getScalarType().isScalar(16) ||
66006600
NarrowTy.getScalarSizeInBits() < (IsSigned ? 17u : 16u))
66016601
return UnableToLegalize;
66026602

@@ -7471,7 +7471,7 @@ LegalizerHelper::lowerU64ToF32BitOps(MachineInstr &MI) {
74717471
const LLT S32 = LLT::scalar(32);
74727472
const LLT S1 = LLT::scalar(1);
74737473

7474-
assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S32);
7474+
assert(MRI.getType(Src).isScalar(64) && MRI.getType(Dst).isScalar(32));
74757475

74767476
// unsigned cul2f(ulong u) {
74777477
// uint lz = clz(u);
@@ -7529,7 +7529,7 @@ LegalizerHelper::lowerU64ToF32WithSITOFP(MachineInstr &MI) {
75297529
const LLT S32 = LLT::scalar(32);
75307530
const LLT S1 = LLT::scalar(1);
75317531

7532-
assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S32);
7532+
assert(MRI.getType(Src).isScalar(64) && MRI.getType(Dst).isScalar(32));
75337533

75347534
// For i64 < INT_MAX we simply reuse SITOFP.
75357535
// Otherwise, divide i64 by 2, round result by ORing with the lowest bit
@@ -7563,7 +7563,7 @@ LegalizerHelper::lowerU64ToF64BitFloatOps(MachineInstr &MI) {
75637563
const LLT S64 = LLT::scalar(64);
75647564
const LLT S32 = LLT::scalar(32);
75657565

7566-
assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64);
7566+
assert(MRI.getType(Src).isScalar(64) && MRI.getType(Dst).isScalar(64));
75677567

75687568
// We create double value from 32 bit parts with 32 exponent difference.
75697569
// Note that + and - are float operations that adjust the implicit leading
@@ -7595,25 +7595,25 @@ LegalizerHelper::lowerU64ToF64BitFloatOps(MachineInstr &MI) {
75957595
LegalizerHelper::LegalizeResult LegalizerHelper::lowerUITOFP(MachineInstr &MI) {
75967596
auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
75977597

7598-
if (SrcTy == LLT::scalar(1)) {
7598+
if (SrcTy.isScalar(1)) {
75997599
auto True = MIRBuilder.buildFConstant(DstTy, 1.0);
76007600
auto False = MIRBuilder.buildFConstant(DstTy, 0.0);
76017601
MIRBuilder.buildSelect(Dst, Src, True, False);
76027602
MI.eraseFromParent();
76037603
return Legalized;
76047604
}
76057605

7606-
if (SrcTy != LLT::scalar(64))
7606+
if (!SrcTy.isScalar(64))
76077607
return UnableToLegalize;
76087608

7609-
if (DstTy == LLT::scalar(32))
7609+
if (DstTy.isScalar(32))
76107610
// TODO: SelectionDAG has several alternative expansions to port which may
76117611
// be more reasonable depending on the available instructions. We also need
76127612
// a more advanced mechanism to choose an optimal version depending on
76137613
// target features such as sitofp or CTLZ availability.
76147614
return lowerU64ToF32WithSITOFP(MI);
76157615

7616-
if (DstTy == LLT::scalar(64))
7616+
if (DstTy.isScalar(64))
76177617
return lowerU64ToF64BitFloatOps(MI);
76187618

76197619
return UnableToLegalize;
@@ -7626,18 +7626,18 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerSITOFP(MachineInstr &MI) {
76267626
const LLT S32 = LLT::scalar(32);
76277627
const LLT S1 = LLT::scalar(1);
76287628

7629-
if (SrcTy == S1) {
7629+
if (SrcTy.isScalar(1)) {
76307630
auto True = MIRBuilder.buildFConstant(DstTy, -1.0);
76317631
auto False = MIRBuilder.buildFConstant(DstTy, 0.0);
76327632
MIRBuilder.buildSelect(Dst, Src, True, False);
76337633
MI.eraseFromParent();
76347634
return Legalized;
76357635
}
76367636

7637-
if (SrcTy != S64)
7637+
if (!SrcTy.isScalar(64))
76387638
return UnableToLegalize;
76397639

7640-
if (DstTy == S32) {
7640+
if (DstTy.isScalar(32)) {
76417641
// signed cl2f(long l) {
76427642
// long s = l >> 63;
76437643
// float r = cul2f((l + s) ^ s);
@@ -7664,12 +7664,10 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerSITOFP(MachineInstr &MI) {
76647664

76657665
LegalizerHelper::LegalizeResult LegalizerHelper::lowerFPTOUI(MachineInstr &MI) {
76667666
auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
7667-
const LLT S64 = LLT::scalar(64);
7668-
const LLT S32 = LLT::scalar(32);
76697667

7670-
if (SrcTy != S64 && SrcTy != S32)
7668+
if (!SrcTy.isScalar(64) && !SrcTy.isScalar(32))
76717669
return UnableToLegalize;
7672-
if (DstTy != S32 && DstTy != S64)
7670+
if (!DstTy.isScalar(32) && !DstTy.isScalar(64))
76737671
return UnableToLegalize;
76747672

76757673
// FPTOSI gives same result as FPTOUI for positive signed integers.
@@ -7704,11 +7702,9 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerFPTOUI(MachineInstr &MI) {
77047702

77057703
LegalizerHelper::LegalizeResult LegalizerHelper::lowerFPTOSI(MachineInstr &MI) {
77067704
auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
7707-
const LLT S64 = LLT::scalar(64);
7708-
const LLT S32 = LLT::scalar(32);
77097705

77107706
// FIXME: Only f32 to i64 conversions are supported.
7711-
if (SrcTy.getScalarType() != S32 || DstTy.getScalarType() != S64)
7707+
if (!SrcTy.getScalarType().isScalar(32) || !DstTy.getScalarType().isScalar(64))
77127708
return UnableToLegalize;
77137709

77147710
// Expand f32 -> i64 conversion
@@ -7873,8 +7869,8 @@ LegalizerHelper::lowerFPTRUNC_F64_TO_F16(MachineInstr &MI) {
78737869
const LLT S32 = LLT::scalar(32);
78747870

78757871
auto [Dst, Src] = MI.getFirst2Regs();
7876-
assert(MRI.getType(Dst).getScalarType() == LLT::scalar(16) &&
7877-
MRI.getType(Src).getScalarType() == LLT::scalar(64));
7872+
assert(MRI.getType(Dst).getScalarType().isScalar(16) &&
7873+
MRI.getType(Src).getScalarType().isScalar(64));
78787874

78797875
if (MRI.getType(Src).isVector()) // TODO: Handle vectors directly.
78807876
return UnableToLegalize;
@@ -7985,10 +7981,8 @@ LegalizerHelper::lowerFPTRUNC_F64_TO_F16(MachineInstr &MI) {
79857981
LegalizerHelper::LegalizeResult
79867982
LegalizerHelper::lowerFPTRUNC(MachineInstr &MI) {
79877983
auto [DstTy, SrcTy] = MI.getFirst2LLTs();
7988-
const LLT S64 = LLT::scalar(64);
7989-
const LLT S16 = LLT::scalar(16);
79907984

7991-
if (DstTy.getScalarType() == S16 && SrcTy.getScalarType() == S64)
7985+
if (DstTy.getScalarType().isScalar(16) && SrcTy.getScalarType().isScalar(64))
79927986
return lowerFPTRUNC_F64_TO_F16(MI);
79937987

79947988
return UnableToLegalize;
@@ -9263,7 +9257,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerSelect(MachineInstr &MI) {
92639257

92649258
// The condition was potentially zero extended before, but we want a sign
92659259
// extended boolean.
9266-
if (MaskTy != LLT::scalar(1))
9260+
if (!MaskTy.isScalar(1))
92679261
MaskElt = MIRBuilder.buildSExtInReg(MaskTy, MaskElt, 1).getReg(0);
92689262

92699263
// Continue the sign extension (or truncate) to match the data type.

llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ struct AMDGPUOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
7272
if (TRI->isSGPRReg(MRI, PhysReg)) {
7373
LLT Ty = MRI.getType(ExtReg);
7474
LLT S32 = LLT::scalar(32);
75-
if (Ty != S32) {
75+
if (!Ty.isScalar(32)) {
7676
// FIXME: We should probably support readfirstlane intrinsics with all
7777
// legal 32-bit types.
7878
assert(Ty.getSizeInBits() == 32);

llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -409,7 +409,7 @@ static bool isFPExtFromF16OrConst(const MachineRegisterInfo &MRI,
409409
const MachineInstr *Def = MRI.getVRegDef(Reg);
410410
if (Def->getOpcode() == TargetOpcode::G_FPEXT) {
411411
Register SrcReg = Def->getOperand(1).getReg();
412-
return MRI.getType(SrcReg) == LLT::scalar(16);
412+
return MRI.getType(SrcReg).isScalar(16);
413413
}
414414

415415
if (Def->getOpcode() == TargetOpcode::G_FCONSTANT) {
@@ -428,7 +428,7 @@ bool AMDGPUCombinerHelper::matchExpandPromotedF16FMed3(MachineInstr &MI,
428428
Register Src2) const {
429429
assert(MI.getOpcode() == TargetOpcode::G_FPTRUNC);
430430
Register SrcReg = MI.getOperand(1).getReg();
431-
if (!MRI.hasOneNonDBGUse(SrcReg) || MRI.getType(SrcReg) != LLT::scalar(32))
431+
if (!MRI.hasOneNonDBGUse(SrcReg) || !MRI.getType(SrcReg).isScalar(32))
432432
return false;
433433

434434
return isFPExtFromF16OrConst(MRI, Src0) && isFPExtFromF16OrConst(MRI, Src1) &&

llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ DivergenceLoweringHelper::DivergenceLoweringHelper(
8787

8888
// _(s1) -> SReg_32/64(s1)
8989
void DivergenceLoweringHelper::markAsLaneMask(Register DstReg) const {
90-
assert(MRI->getType(DstReg) == LLT::scalar(1));
90+
assert(MRI->getType(DstReg).isScalar(1));
9191

9292
if (MRI->getRegClassOrNull(DstReg)) {
9393
if (MRI->constrainRegClass(DstReg, ST->getBoolRC()))
@@ -100,13 +100,11 @@ void DivergenceLoweringHelper::markAsLaneMask(Register DstReg) const {
100100

101101
void DivergenceLoweringHelper::getCandidatesForLowering(
102102
SmallVectorImpl<MachineInstr *> &Vreg1Phis) const {
103-
LLT S1 = LLT::scalar(1);
104-
105103
// Add divergent i1 phis to the list
106104
for (MachineBasicBlock &MBB : *MF) {
107105
for (MachineInstr &MI : MBB.phis()) {
108106
Register Dst = MI.getOperand(0).getReg();
109-
if (MRI->getType(Dst) == S1 && MUI->isDivergent(Dst))
107+
if (MRI->getType(Dst).isScalar(1) && MUI->isDivergent(Dst))
110108
Vreg1Phis.push_back(&MI);
111109
}
112110
}

llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
105105
MachineOperand &Src = MI.getOperand(1);
106106

107107
// TODO: This should be legalized to s32 if needed
108-
if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
108+
if (MRI->getType(Dst.getReg()).isScalar(1))
109109
return false;
110110

111111
const TargetRegisterClass *DstRC
@@ -293,7 +293,7 @@ bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
293293
// - divergent S1 G_PHI should go through lane mask merging algorithm
294294
// and be fully inst-selected in AMDGPUGlobalISelDivergenceLowering
295295
// - uniform S1 G_PHI should be lowered into S32 G_PHI in AMDGPURegBankSelect
296-
if (DefTy == LLT::scalar(1))
296+
if (DefTy.isScalar(1))
297297
return false;
298298

299299
// TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
@@ -733,9 +733,9 @@ bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR(MachineInstr &MI) const {
733733
// Selection logic below is for V2S16 only.
734734
// For G_BUILD_VECTOR_TRUNC, additionally check that the operands are s32.
735735
Register Dst = MI.getOperand(0).getReg();
736-
if (MRI->getType(Dst) != LLT::fixed_vector(2, 16) ||
736+
if (!MRI->getType(Dst).isFixedVector(2, 16) ||
737737
(MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC &&
738-
SrcTy != LLT::scalar(32)))
738+
!SrcTy.isScalar(32)))
739739
return selectImpl(MI, *CoverageInfo);
740740

741741
const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
@@ -1073,9 +1073,9 @@ bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
10731073

10741074
LLT Ty = MRI->getType(Dst0);
10751075
unsigned Opc;
1076-
if (Ty == LLT::scalar(32))
1076+
if (Ty.isScalar(32))
10771077
Opc = AMDGPU::V_DIV_SCALE_F32_e64;
1078-
else if (Ty == LLT::scalar(64))
1078+
else if (Ty.isScalar(64))
10791079
Opc = AMDGPU::V_DIV_SCALE_F64_e64;
10801080
else
10811081
return false;
@@ -2387,11 +2387,10 @@ bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
23872387
Register SrcReg = I.getOperand(1).getReg();
23882388
const LLT DstTy = MRI->getType(DstReg);
23892389
const LLT SrcTy = MRI->getType(SrcReg);
2390-
const LLT S1 = LLT::scalar(1);
23912390

23922391
const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
23932392
const RegisterBank *DstRB;
2394-
if (DstTy == S1) {
2393+
if (DstTy.isScalar(1)) {
23952394
// This is a special case. We don't treat s1 for legalization artifacts as
23962395
// vcc booleans.
23972396
DstRB = SrcRB;
@@ -2429,7 +2428,7 @@ bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
24292428
return true;
24302429
}
24312430

2432-
if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
2431+
if (DstTy.isFixedVector(2, 16) && SrcTy.isFixedVector(2, 32)) {
24332432
MachineBasicBlock *MBB = I.getParent();
24342433
const DebugLoc &DL = I.getDebugLoc();
24352434

@@ -2721,8 +2720,7 @@ static bool isExtractHiElt(MachineRegisterInfo &MRI, Register In,
27212720
if (Shuffle->getOpcode() != AMDGPU::G_SHUFFLE_VECTOR)
27222721
return false;
27232722

2724-
assert(MRI.getType(Shuffle->getOperand(0).getReg()) ==
2725-
LLT::fixed_vector(2, 16));
2723+
assert(MRI.getType(Shuffle->getOperand(0).getReg()).isFixedVector(2, 16));
27262724

27272725
ArrayRef<int> Mask = Shuffle->getOperand(3).getShuffleMask();
27282726
assert(Mask.size() == 2);
@@ -2746,8 +2744,8 @@ bool AMDGPUInstructionSelector::selectG_FPEXT(MachineInstr &I) const {
27462744

27472745
Register Src = I.getOperand(1).getReg();
27482746

2749-
if (MRI->getType(Dst) == LLT::scalar(32) &&
2750-
MRI->getType(Src) == LLT::scalar(16)) {
2747+
if (MRI->getType(Dst).isScalar(32) &&
2748+
MRI->getType(Src).isScalar(16)) {
27512749
if (isExtractHiElt(*MRI, Src, Src)) {
27522750
MachineBasicBlock *BB = I.getParent();
27532751
BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_CVT_HI_F32_F16), Dst)
@@ -2775,7 +2773,7 @@ bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
27752773
Register Dst = MI.getOperand(0).getReg();
27762774
const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
27772775
if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2778-
MRI->getType(Dst) != LLT::scalar(64))
2776+
!MRI->getType(Dst).isScalar(64))
27792777
return false;
27802778

27812779
Register Src = MI.getOperand(1).getReg();
@@ -2821,7 +2819,7 @@ bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
28212819
Register Dst = MI.getOperand(0).getReg();
28222820
const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
28232821
if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2824-
MRI->getType(Dst) != LLT::scalar(64))
2822+
!MRI->getType(Dst).isScalar(64))
28252823
return false;
28262824

28272825
Register Src = MI.getOperand(1).getReg();
@@ -2993,7 +2991,7 @@ bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
29932991
// RegBankSelect knows what it's doing if the branch condition is scc, even
29942992
// though it currently does not.
29952993
if (!isVCC(CondReg, *MRI)) {
2996-
if (MRI->getType(CondReg) != LLT::scalar(32))
2994+
if (!MRI->getType(CondReg).isScalar(32))
29972995
return false;
29982996

29992997
CondPhysReg = AMDGPU::SCC;
@@ -3456,15 +3454,15 @@ bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
34563454
static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
34573455
Register ZExtSrc;
34583456
if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3459-
return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3457+
return MRI.getType(ZExtSrc).isScalar(32) ? ZExtSrc : Register();
34603458

34613459
// Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
34623460
const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
34633461
if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
34643462
return Register();
34653463

34663464
assert(Def->getNumOperands() == 3 &&
3467-
MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64));
3465+
MRI.getType(Def->getOperand(0).getReg()).isScalar(64));
34683466
if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
34693467
return Def->getOperand(1).getReg();
34703468
}
@@ -4054,7 +4052,7 @@ bool AMDGPUInstructionSelector::select(MachineInstr &I) {
40544052
// This is a workaround. For extension from type i1, `selectImpl()` uses
40554053
// patterns from TD file and generates an illegal VGPR to SGPR COPY as type
40564054
// i1 can only be hold in a SGPR class.
4057-
if (MRI->getType(I.getOperand(1).getReg()) != LLT::scalar(1) &&
4055+
if (!MRI->getType(I.getOperand(1).getReg()).isScalar(1) &&
40584056
selectImpl(I, *CoverageInfo))
40594057
return true;
40604058
return selectG_SZA_EXT(I);
@@ -4287,7 +4285,7 @@ AMDGPUInstructionSelector::selectVOP3PModsImpl(
42874285
if (MI->getOpcode() == AMDGPU::G_FNEG &&
42884286
// It's possible to see an f32 fneg here, but unlikely.
42894287
// TODO: Treat f32 fneg as only high bit.
4290-
MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
4288+
MRI.getType(Src).isFixedVector(2, 16)) {
42914289
Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
42924290
Src = MI->getOperand(1).getReg();
42934291
MI = MRI.getVRegDef(Src);
@@ -5785,7 +5783,7 @@ AMDGPUInstructionSelector::selectSMRDBufferSgprImm(MachineOperand &Root) const {
57855783
if (!EncodedOffset)
57865784
return std::nullopt;
57875785

5788-
assert(MRI->getType(SOffset) == LLT::scalar(32));
5786+
assert(MRI->getType(SOffset).isScalar(32));
57895787
return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); },
57905788
[=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedOffset); }}};
57915789
}
@@ -5800,7 +5798,7 @@ AMDGPUInstructionSelector::selectVOP3PMadMixModsImpl(MachineOperand &Root,
58005798
std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
58015799

58025800
if (mi_match(Src, *MRI, m_GFPExt(m_Reg(Src)))) {
5803-
assert(MRI->getType(Src) == LLT::scalar(16));
5801+
assert(MRI->getType(Src).isScalar(16));
58045802

58055803
// Only change Src if src modifier could be gained. In such cases new Src
58065804
// could be sgpr but this does not violate constant bus restriction for

0 commit comments

Comments
 (0)