Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPU.td
Original file line number Diff line number Diff line change
Expand Up @@ -2776,6 +2776,9 @@ def HasGetWaveIdInst : Predicate<"Subtarget->hasGetWaveIdInst()">,
def HasMAIInsts : Predicate<"Subtarget->hasMAIInsts()">,
AssemblerPredicate<(all_of FeatureMAIInsts)>;

def NotHasMAIInsts : Predicate<"!Subtarget->hasMAIInsts()">,
AssemblerPredicate<(all_of (not FeatureMAIInsts))>;

def HasSMemRealTime : Predicate<"Subtarget->hasSMemRealTime()">,
AssemblerPredicate<(all_of FeatureSMemRealTime)>;

Expand Down Expand Up @@ -2950,6 +2953,20 @@ def HasLdsBarrierArriveAtomic : Predicate<"Subtarget->hasLdsBarrierArriveAtomic(
def HasSetPrioIncWgInst : Predicate<"Subtarget->hasSetPrioIncWgInst()">,
AssemblerPredicate<(all_of FeatureSetPrioIncWgInst)>;

def NeedsAlignedVGPRs : Predicate<"Subtarget->needsAlignedVGPRs()">,
AssemblerPredicate<(all_of FeatureRequiresAlignedVGPRs)>;

//===----------------------------------------------------------------------===//
// HwModes
//===----------------------------------------------------------------------===//

// gfx90a-gfx950. Has AGPRs, and also the align2 VGPR/AGPR requirement
def AVAlign2LoadStoreMode : HwMode<[HasMAIInsts, NeedsAlignedVGPRs]>;

// gfx1250, has alignment requirement but no AGPRs.
def AlignedVGPRNoAGPRMode : HwMode<[NotHasMAIInsts, NeedsAlignedVGPRs]>;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What will be the Mode for gfx908 that has AGPRs but no strict VGPR align requirement?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

DefaultMode. That's mostly the reason for having separate AV and AV_LdSt cases

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Got it.



// Include AMDGPU TD files
include "SISchedule.td"
include "GCNProcessors.td"
Expand Down
13 changes: 8 additions & 5 deletions llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -393,12 +393,13 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,

switch (N->getMachineOpcode()) {
default: {
const MCInstrDesc &Desc =
Subtarget->getInstrInfo()->get(N->getMachineOpcode());
const SIInstrInfo *TII = Subtarget->getInstrInfo();
const MCInstrDesc &Desc = TII->get(N->getMachineOpcode());
unsigned OpIdx = Desc.getNumDefs() + OpNo;
if (OpIdx >= Desc.getNumOperands())
return nullptr;
int RegClass = Desc.operands()[OpIdx].RegClass;

int16_t RegClass = TII->getOpRegClassID(Desc.operands()[OpIdx]);
if (RegClass == -1)
return nullptr;

Expand Down Expand Up @@ -4353,7 +4354,8 @@ bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const {
if (!RC || SIRI->isSGPRClass(RC))
return false;

if (RC != &AMDGPU::VS_32RegClass && RC != &AMDGPU::VS_64RegClass) {
if (RC != &AMDGPU::VS_32RegClass && RC != &AMDGPU::VS_64RegClass &&
RC != &AMDGPU::VS_64_Align2RegClass) {
AllUsesAcceptSReg = false;
SDNode *User = U->getUser();
if (User->isMachineOpcode()) {
Expand All @@ -4367,7 +4369,8 @@ bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const {
const TargetRegisterClass *CommutedRC =
getOperandRegClass(U->getUser(), CommutedOpNo);
if (CommutedRC == &AMDGPU::VS_32RegClass ||
CommutedRC == &AMDGPU::VS_64RegClass)
CommutedRC == &AMDGPU::VS_64RegClass ||
CommutedRC == &AMDGPU::VS_64_Align2RegClass)
AllUsesAcceptSReg = true;
}
}
Expand Down
30 changes: 19 additions & 11 deletions llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1347,6 +1347,7 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
bool ForcedDPP = false;
bool ForcedSDWA = false;
KernelScopeInfo KernelScope;
const unsigned HwMode;

/// @name Auto-generated Match Functions
/// {
Expand All @@ -1356,6 +1357,13 @@ class AMDGPUAsmParser : public MCTargetAsmParser {

/// }

/// Get size of register operand
unsigned getRegOperandSize(const MCInstrDesc &Desc, unsigned OpNo) const {
assert(OpNo < Desc.NumOperands);
int16_t RCID = MII.getOpRegClassID(Desc.operands()[OpNo], HwMode);
return getRegBitWidth(RCID) / 8;
}

private:
void createConstantSymbol(StringRef Id, int64_t Val);

Expand Down Expand Up @@ -1442,9 +1450,9 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;

AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
const MCInstrInfo &MII,
const MCTargetOptions &Options)
: MCTargetAsmParser(Options, STI, MII), Parser(_Parser) {
const MCInstrInfo &MII, const MCTargetOptions &Options)
: MCTargetAsmParser(Options, STI, MII), Parser(_Parser),
HwMode(STI.getHwMode(MCSubtargetInfo::HwMode_RegInfo)) {
MCAsmParserExtension::Initialize(Parser);

setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
Expand Down Expand Up @@ -4106,7 +4114,7 @@ bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst, SMLoc IDLoc) {
if ((DMaskIdx == -1 || TFEIdx == -1) && isGFX10_AEncoding()) // intersect_ray
return true;

unsigned VDataSize = AMDGPU::getRegOperandSize(getMRI(), Desc, VDataIdx);
unsigned VDataSize = getRegOperandSize(Desc, VDataIdx);
unsigned TFESize = (TFEIdx != -1 && Inst.getOperand(TFEIdx).getImm()) ? 1 : 0;
unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
if (DMask == 0)
Expand Down Expand Up @@ -4170,8 +4178,7 @@ bool AMDGPUAsmParser::validateMIMGAddrSize(const MCInst &Inst, SMLoc IDLoc) {
const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfoByEncoding(Dim);
bool IsNSA = SrsrcIdx - VAddr0Idx > 1;
unsigned ActualAddrSize =
IsNSA ? SrsrcIdx - VAddr0Idx
: AMDGPU::getRegOperandSize(getMRI(), Desc, VAddr0Idx) / 4;
IsNSA ? SrsrcIdx - VAddr0Idx : getRegOperandSize(Desc, VAddr0Idx) / 4;

unsigned ExpectedAddrSize =
AMDGPU::getAddrSizeMIMGOp(BaseOpcode, DimInfo, IsA16, hasG16());
Expand All @@ -4181,8 +4188,7 @@ bool AMDGPUAsmParser::validateMIMGAddrSize(const MCInst &Inst, SMLoc IDLoc) {
ExpectedAddrSize >
getNSAMaxSize(Desc.TSFlags & SIInstrFlags::VSAMPLE)) {
int VAddrLastIdx = SrsrcIdx - 1;
unsigned VAddrLastSize =
AMDGPU::getRegOperandSize(getMRI(), Desc, VAddrLastIdx) / 4;
unsigned VAddrLastSize = getRegOperandSize(Desc, VAddrLastIdx) / 4;

ActualAddrSize = VAddrLastIdx - VAddr0Idx + VAddrLastSize;
}
Expand Down Expand Up @@ -4428,7 +4434,8 @@ bool AMDGPUAsmParser::validateMFMA(const MCInst &Inst,
return true;

const MCRegisterInfo *TRI = getContext().getRegisterInfo();
if (TRI->getRegClass(Desc.operands()[0].RegClass).getSizeInBits() <= 128)
if (TRI->getRegClass(MII.getOpRegClassID(Desc.operands()[0], HwMode))
.getSizeInBits() <= 128)
return true;

if (TRI->regsOverlap(Src2Reg, DstReg)) {
Expand Down Expand Up @@ -4999,7 +5006,7 @@ bool AMDGPUAsmParser::validateDPP(const MCInst &Inst,
unsigned DppCtrl = Inst.getOperand(DppCtrlIdx).getImm();

if (!AMDGPU::isLegalDPALU_DPPControl(getSTI(), DppCtrl) &&
AMDGPU::isDPALU_DPP(MII.get(Opc), getSTI())) {
AMDGPU::isDPALU_DPP(MII.get(Opc), MII, getSTI())) {
// DP ALU DPP is supported for row_newbcast only on GFX9* and row_share
// only on GFX12.
SMLoc S = getImmLoc(AMDGPUOperand::ImmTyDppCtrl, Operands);
Expand Down Expand Up @@ -5522,7 +5529,8 @@ bool AMDGPUAsmParser::validateWMMA(const MCInst &Inst,
unsigned Fmt = Inst.getOperand(FmtIdx).getImm();
int SrcIdx = AMDGPU::getNamedOperandIdx(Opc, SrcOp);
unsigned RegSize =
TRI->getRegClass(Desc.operands()[SrcIdx].RegClass).getSizeInBits();
TRI->getRegClass(MII.getOpRegClassID(Desc.operands()[SrcIdx], HwMode))
.getSizeInBits();

if (RegSize == AMDGPU::wmmaScaleF8F6F4FormatToNumRegs(Fmt) * 32)
return true;
Expand Down
31 changes: 16 additions & 15 deletions llvm/lib/Target/AMDGPU/BUFInstructions.td
Original file line number Diff line number Diff line change
Expand Up @@ -417,10 +417,10 @@ class getBUFVDataRegisterOperandForOp<RegisterOperand Op, bit isTFE> {
}

class getMUBUFInsDA<list<RegisterOperand> vdataList,
list<RegisterClass> vaddrList, bit isTFE, bit hasRestrictedSOffset> {
list<RegisterClassLike> vaddrList, bit isTFE, bit hasRestrictedSOffset> {
RegisterOperand vdataClass = !if(!empty(vdataList), ?, !head(vdataList));
RegisterClass vaddrClass = !if(!empty(vaddrList), ?, !head(vaddrList));
RegisterOperand vdata_op = getBUFVDataRegisterOperandForOp<vdataClass, isTFE>.ret;
RegisterClassLike vaddrClass = !if(!empty(vaddrList), ?, !head(vaddrList));
RegisterOperand vdata_op = getBUFVDataRegisterOperand<!cast<SIRegisterClassLike>(vdataClass.RegClass).Size, isTFE>.ret;

dag SOffset = !if(hasRestrictedSOffset, (ins SReg_32:$soffset), (ins SCSrc_b32:$soffset));
dag NonVaddrInputs = !con((ins SReg_128_XNULL:$srsrc), SOffset, (ins Offset:$offset, CPol_0:$cpol, i1imm_0:$swz));
Expand Down Expand Up @@ -453,8 +453,8 @@ class getMUBUFIns<int addrKind, list<RegisterOperand> vdataList, bit isTFE, bit
!if(!eq(addrKind, BUFAddrKind.Offset), getMUBUFInsDA<vdataList, [], isTFE, hasRestrictedSOffset>.ret,
!if(!eq(addrKind, BUFAddrKind.OffEn), getMUBUFInsDA<vdataList, [VGPR_32], isTFE, hasRestrictedSOffset>.ret,
!if(!eq(addrKind, BUFAddrKind.IdxEn), getMUBUFInsDA<vdataList, [VGPR_32], isTFE, hasRestrictedSOffset>.ret,
!if(!eq(addrKind, BUFAddrKind.BothEn), getMUBUFInsDA<vdataList, [VReg_64], isTFE, hasRestrictedSOffset>.ret,
!if(!eq(addrKind, BUFAddrKind.Addr64), getMUBUFInsDA<vdataList, [VReg_64], isTFE, hasRestrictedSOffset>.ret,
!if(!eq(addrKind, BUFAddrKind.BothEn), getMUBUFInsDA<vdataList, [VReg_64_AlignTarget], isTFE, hasRestrictedSOffset>.ret,
!if(!eq(addrKind, BUFAddrKind.Addr64), getMUBUFInsDA<vdataList, [VReg_64_AlignTarget], isTFE, hasRestrictedSOffset>.ret,
(ins))))));
}

Expand Down Expand Up @@ -677,8 +677,8 @@ class MUBUF_Pseudo_Store_Lds<string opName>
}

class getMUBUFAtomicInsDA<RegisterOperand vdata_op, bit vdata_in, bit hasRestrictedSOffset,
list<RegisterClass> vaddrList=[]> {
RegisterClass vaddrClass = !if(!empty(vaddrList), ?, !head(vaddrList));
list<RegisterClassLike> vaddrList=[]> {
RegisterClassLike vaddrClass = !if(!empty(vaddrList), ?, !head(vaddrList));

dag VData = !if(vdata_in, (ins vdata_op:$vdata_in), (ins vdata_op:$vdata));
dag Data = !if(!empty(vaddrList), VData, !con(VData, (ins vaddrClass:$vaddr)));
Expand All @@ -702,9 +702,9 @@ class getMUBUFAtomicIns<int addrKind,
!if(!eq(addrKind, BUFAddrKind.IdxEn),
getMUBUFAtomicInsDA<vdataClass, vdata_in, hasRestrictedSOffset, [VGPR_32]>.ret,
!if(!eq(addrKind, BUFAddrKind.BothEn),
getMUBUFAtomicInsDA<vdataClass, vdata_in, hasRestrictedSOffset, [VReg_64]>.ret,
getMUBUFAtomicInsDA<vdataClass, vdata_in, hasRestrictedSOffset, [VReg_64_AlignTarget]>.ret,
!if(!eq(addrKind, BUFAddrKind.Addr64),
getMUBUFAtomicInsDA<vdataClass, vdata_in, hasRestrictedSOffset, [VReg_64]>.ret,
getMUBUFAtomicInsDA<vdataClass, vdata_in, hasRestrictedSOffset, [VReg_64_AlignTarget]>.ret,
(ins))))));
}

Expand Down Expand Up @@ -1568,11 +1568,12 @@ multiclass BufferAtomicCmpSwapPat_Common<ValueType vt, ValueType data_vt, string
# !if(!eq(RtnMode, "ret"), "", "_noret")
# "_" # vt);
defvar InstSuffix = !if(!eq(RtnMode, "ret"), "_RTN", "");
defvar data_vt_RC = getVregSrcForVT<data_vt>.ret.RegClass;
defvar data_op = getVregSrcForVT<data_vt>.ret;
defvar data_vt_RC = getVregClassForVT<data_vt>.ret;

let AddedComplexity = !if(!eq(RtnMode, "ret"), 0, 1) in {
defvar OffsetResDag = (!cast<MUBUF_Pseudo>(Inst # "_OFFSET" # InstSuffix)
data_vt_RC:$vdata_in, SReg_128:$srsrc, SCSrc_b32:$soffset,
data_op:$vdata_in, SReg_128:$srsrc, SCSrc_b32:$soffset,
Offset:$offset);
def : GCNPat<
(vt (Op (MUBUFOffset v4i32:$srsrc, i32:$soffset, i32:$offset), data_vt:$vdata_in)),
Expand All @@ -1583,7 +1584,7 @@ multiclass BufferAtomicCmpSwapPat_Common<ValueType vt, ValueType data_vt, string
>;

defvar Addr64ResDag = (!cast<MUBUF_Pseudo>(Inst # "_ADDR64" # InstSuffix)
data_vt_RC:$vdata_in, VReg_64:$vaddr, SReg_128:$srsrc,
data_op:$vdata_in, VReg_64:$vaddr, SReg_128:$srsrc,
SCSrc_b32:$soffset, Offset:$offset);
def : GCNPat<
(vt (Op (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i32:$soffset, i32:$offset),
Expand Down Expand Up @@ -1832,7 +1833,7 @@ multiclass SIBufferAtomicCmpSwapPat_Common<ValueType vt, ValueType data_vt, stri
(extract_cpol_set_glc $auxiliary),
(extract_cpol $auxiliary));
defvar SrcRC = getVregSrcForVT<vt>.ret;
defvar DataRC = getVregSrcForVT<data_vt>.ret.RegClass;
defvar DataRC = getVregClassForVT<data_vt>.ret;
defvar SubLo = !if(!eq(vt, i32), sub0, sub0_sub1);
defvar SubHi = !if(!eq(vt, i32), sub1, sub2_sub3);

Expand Down Expand Up @@ -2088,7 +2089,7 @@ defm : MUBUFStore_PatternOffset <"BUFFER_STORE_SHORT", i16, store_global>;

multiclass MUBUFScratchStorePat_Common <string Instr,
ValueType vt, PatFrag st,
RegisterClass rc = VGPR_32> {
RegisterClassLike rc = VGPR_32> {
def : GCNPat <
(st vt:$value, (MUBUFScratchOffen v4i32:$srsrc, i32:$vaddr,
i32:$soffset, i32:$offset)),
Expand All @@ -2104,7 +2105,7 @@ multiclass MUBUFScratchStorePat_Common <string Instr,

multiclass MUBUFScratchStorePat <string Instr,
ValueType vt, PatFrag st,
RegisterClass rc = VGPR_32> {
RegisterClassLike rc = VGPR_32> {
let SubtargetPredicate = HasUnrestrictedSOffset in {
defm : MUBUFScratchStorePat_Common<Instr, vt, st, rc>;
}
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/AMDGPU/DSInstructions.td
Original file line number Diff line number Diff line change
Expand Up @@ -904,7 +904,7 @@ let SubtargetPredicate = isGFX1250Plus in {
let WaveSizePredicate = isWave32, mayStore = 0 in {
let OtherPredicates = [HasTransposeLoadF4F6Insts] in {
defm DS_LOAD_TR4_B64 : DS_1A_RET_NoM0<"ds_load_tr4_b64", VGPROp_64>;
defm DS_LOAD_TR6_B96 : DS_1A_RET_NoM0<"ds_load_tr6_b96", VGPROp_96>;
defm DS_LOAD_TR6_B96 : DS_1A_RET_NoM0<"ds_load_tr6_b96", VGPROp_96_Align1>;
} // End OtherPredicates = [HasTransposeLoadF4F6Insts]
defm DS_LOAD_TR8_B64 : DS_1A_RET_NoM0<"ds_load_tr8_b64", VGPROp_64>;
defm DS_LOAD_TR16_B128 : DS_1A_RET_NoM0<"ds_load_tr16_b128", VGPROp_128>;
Expand Down Expand Up @@ -934,7 +934,7 @@ let WaveSizePredicate = isWave64, SubtargetPredicate = HasGFX950Insts, mayStore
defm DS_READ_B64_TR_B4 : DS_1A_RET_NoM0<"ds_read_b64_tr_b4", AVLdSt_64>;
defm DS_READ_B64_TR_B8 : DS_1A_RET_NoM0<"ds_read_b64_tr_b8", AVLdSt_64>;
defm DS_READ_B64_TR_B16 : DS_1A_RET_NoM0<"ds_read_b64_tr_b16", AVLdSt_64>;
defm DS_READ_B96_TR_B6 : DS_1A_RET_NoM0<"ds_read_b96_tr_b6", AVLdSt_96>;
defm DS_READ_B96_TR_B6 : DS_1A_RET_NoM0<"ds_read_b96_tr_b6", AVLdSt_96_Align1>;
}

//===----------------------------------------------------------------------===//
Expand Down
16 changes: 11 additions & 5 deletions llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,9 @@ static int64_t getInlineImmVal64(unsigned Imm);
AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo &STI,
MCContext &Ctx, MCInstrInfo const *MCII)
: MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()),
MAI(*Ctx.getAsmInfo()), TargetMaxInstBytes(MAI.getMaxInstLength(&STI)),
MAI(*Ctx.getAsmInfo()),
HwModeRegClass(STI.getHwMode(MCSubtargetInfo::HwMode_RegInfo)),
TargetMaxInstBytes(MAI.getMaxInstLength(&STI)),
CodeObjectVersion(AMDGPU::getDefaultAMDHSACodeObjectVersion()) {
// ToDo: AMDGPUDisassembler supports only VI ISA.
if (!STI.hasFeature(AMDGPU::FeatureGCN3Encoding) && !isGFX10Plus())
Expand Down Expand Up @@ -825,7 +827,8 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
}

if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG) {
const MCInstrDesc &Desc = MCII->get(MI.getOpcode());
if (Desc.TSFlags & SIInstrFlags::MIMG) {
int VAddr0Idx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
int RsrcIdx =
Expand All @@ -838,7 +841,7 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
for (unsigned i = 0; i < NSAArgs; ++i) {
const unsigned VAddrIdx = VAddr0Idx + 1 + i;
auto VAddrRCID =
MCII->get(MI.getOpcode()).operands()[VAddrIdx].RegClass;
MCII->getOpRegClassID(Desc.operands()[VAddrIdx], HwModeRegClass);
MI.insert(MI.begin() + VAddrIdx, createRegOperand(VAddrRCID, Bytes[i]));
}
Bytes = Bytes.slice(4 * NSAWords);
Expand Down Expand Up @@ -1311,7 +1314,8 @@ void AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
// Widen the register to the correct number of enabled channels.
MCRegister NewVdata;
if (DstSize != Info->VDataDwords) {
auto DataRCID = MCII->get(NewOpcode).operands()[VDataIdx].RegClass;
auto DataRCID = MCII->getOpRegClassID(
MCII->get(NewOpcode).operands()[VDataIdx], HwModeRegClass);

// Get first subregister of VData
MCRegister Vdata0 = MI.getOperand(VDataIdx).getReg();
Expand All @@ -1338,7 +1342,9 @@ void AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
MCRegister VAddrSubSA = MRI.getSubReg(VAddrSA, AMDGPU::sub0);
VAddrSA = VAddrSubSA ? VAddrSubSA : VAddrSA;

auto AddrRCID = MCII->get(NewOpcode).operands()[VAddrSAIdx].RegClass;
auto AddrRCID = MCII->getOpRegClassID(
MCII->get(NewOpcode).operands()[VAddrSAIdx], HwModeRegClass);

const MCRegisterClass &NewRC = MRI.getRegClass(AddrRCID);
NewVAddrSA = MRI.getMatchingSuperReg(VAddrSA, AMDGPU::sub0, &NewRC);
NewVAddrSA = CheckVGPROverflow(NewVAddrSA, NewRC, MRI);
Expand Down
1 change: 1 addition & 0 deletions llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ class AMDGPUDisassembler : public MCDisassembler {
std::unique_ptr<MCInstrInfo const> const MCII;
const MCRegisterInfo &MRI;
const MCAsmInfo &MAI;
const unsigned HwModeRegClass;
const unsigned TargetMaxInstBytes;
mutable ArrayRef<uint8_t> Bytes;
mutable uint32_t Literal;
Expand Down
Loading