Skip to content

Commit e673651

Browse files
kosarevmahesh-attarde
authored andcommitted
[AMDGPU][AsmParser] Introduce MC representation for lit() and lit64(). (llvm#160316)
And rework the lit64() support to use it. The rules for when to add lit64() can be simplified and improved. In this change, however, we just follow the existing conventions on the assembler and disassembler sides. In codegen we do not (and normally should not need to) add explicit lit() and lit64() modifiers, so the codegen tests lose them. The change is an NFCI otherwise. Simplifies printing operands.
1 parent 5005e5f commit e673651

20 files changed

+304
-165
lines changed

llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp

Lines changed: 70 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,6 @@ class AMDGPUAsmParser;
5555

5656
enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_AGPR, IS_TTMP, IS_SPECIAL };
5757

58-
enum class LitModifier { None, Lit, Lit64 };
59-
6058
//===----------------------------------------------------------------------===//
6159
// Operand
6260
//===----------------------------------------------------------------------===//
@@ -1591,10 +1589,14 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
15911589
return static_cast<AMDGPUTargetStreamer &>(TS);
15921590
}
15931591

1594-
const MCRegisterInfo *getMRI() const {
1592+
MCContext &getContext() const {
15951593
// We need this const_cast because for some reason getContext() is not const
15961594
// in MCAsmParser.
1597-
return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
1595+
return const_cast<AMDGPUAsmParser *>(this)->MCTargetAsmParser::getContext();
1596+
}
1597+
1598+
const MCRegisterInfo *getMRI() const {
1599+
return getContext().getRegisterInfo();
15981600
}
15991601

16001602
const MCInstrInfo *getMII() const {
@@ -2313,6 +2315,11 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
23132315
APInt Literal(64, Val);
23142316
uint8_t OpTy = InstDesc.operands()[OpNum].OperandType;
23152317

2318+
bool CanUse64BitLiterals =
2319+
AsmParser->has64BitLiterals() &&
2320+
!(InstDesc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P));
2321+
MCContext &Ctx = AsmParser->getContext();
2322+
23162323
if (Imm.IsFPImm) { // We got fp literal token
23172324
switch (OpTy) {
23182325
case AMDGPU::OPERAND_REG_IMM_INT64:
@@ -2342,7 +2349,15 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
23422349
Val &= 0xffffffff00000000u;
23432350
}
23442351

2345-
Inst.addOperand(MCOperand::createImm(Val));
2352+
if ((OpTy == AMDGPU::OPERAND_REG_IMM_FP64 ||
2353+
OpTy == AMDGPU::OPERAND_REG_INLINE_C_FP64 ||
2354+
OpTy == AMDGPU::OPERAND_REG_INLINE_AC_FP64) &&
2355+
CanUse64BitLiterals && Lo_32(Val) != 0) {
2356+
Inst.addOperand(MCOperand::createExpr(
2357+
AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
2358+
} else {
2359+
Inst.addOperand(MCOperand::createImm(Val));
2360+
}
23462361
return;
23472362
}
23482363

@@ -2352,7 +2367,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
23522367
llvm_unreachable("fp literal in 64-bit integer instruction.");
23532368

23542369
case AMDGPU::OPERAND_KIMM64:
2355-
Inst.addOperand(MCOperand::createImm(Val));
2370+
if (CanUse64BitLiterals && Lo_32(Val) != 0) {
2371+
Inst.addOperand(MCOperand::createExpr(
2372+
AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
2373+
} else {
2374+
Inst.addOperand(MCOperand::createImm(Val));
2375+
}
23562376
return;
23572377

23582378
case AMDGPU::OPERAND_REG_IMM_BF16:
@@ -2442,7 +2462,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
24422462
getModifiers().Lit == LitModifier::Lit)
24432463
Val = Lo_32(Val);
24442464

2445-
Inst.addOperand(MCOperand::createImm(Val));
2465+
if (CanUse64BitLiterals && (!isInt<32>(Val) || !isUInt<32>(Val))) {
2466+
Inst.addOperand(MCOperand::createExpr(
2467+
AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
2468+
} else {
2469+
Inst.addOperand(MCOperand::createImm(Val));
2470+
}
24462471
return;
24472472

24482473
case AMDGPU::OPERAND_REG_IMM_FP64:
@@ -2469,7 +2494,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
24692494
Val = static_cast<uint64_t>(Val) << 32;
24702495
}
24712496

2472-
Inst.addOperand(MCOperand::createImm(Val));
2497+
if (CanUse64BitLiterals && Lo_32(Val) != 0) {
2498+
Inst.addOperand(MCOperand::createExpr(
2499+
AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
2500+
} else {
2501+
Inst.addOperand(MCOperand::createImm(Val));
2502+
}
24732503
return;
24742504

24752505
case AMDGPU::OPERAND_REG_IMM_INT16:
@@ -2491,7 +2521,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
24912521
getModifiers().Lit != LitModifier::Lit64)
24922522
Val <<= 32;
24932523

2494-
Inst.addOperand(MCOperand::createImm(Val));
2524+
if (CanUse64BitLiterals && Lo_32(Val) != 0) {
2525+
Inst.addOperand(MCOperand::createExpr(
2526+
AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
2527+
} else {
2528+
Inst.addOperand(MCOperand::createImm(Val));
2529+
}
24952530
return;
24962531

24972532
default:
@@ -3640,7 +3675,7 @@ bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
36403675

36413676
const MCOperand &MO = Inst.getOperand(OpIdx);
36423677

3643-
int64_t Val = MO.getImm();
3678+
int64_t Val = MO.isImm() ? MO.getImm() : getLitValue(MO.getExpr());
36443679
auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
36453680

36463681
switch (OpSize) { // expected operand size
@@ -4768,16 +4803,26 @@ bool AMDGPUAsmParser::validateSOPLiteral(const MCInst &Inst,
47684803
const MCOperand &MO = Inst.getOperand(OpIdx);
47694804
// Exclude special imm operands (like that used by s_set_gpr_idx_on)
47704805
if (AMDGPU::isSISrcOperand(Desc, OpIdx)) {
4771-
if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) {
4806+
std::optional<int64_t> Imm;
4807+
if (MO.isImm()) {
4808+
Imm = MO.getImm();
4809+
} else if (MO.isExpr()) {
4810+
if (isLitExpr(MO.getExpr()))
4811+
Imm = getLitValue(MO.getExpr());
4812+
} else {
4813+
continue;
4814+
}
4815+
4816+
if (!Imm.has_value()) {
4817+
++NumExprs;
4818+
} else if (!isInlineConstant(Inst, OpIdx)) {
47724819
auto OpType = static_cast<AMDGPU::OperandType>(
47734820
Desc.operands()[OpIdx].OperandType);
4774-
int64_t Value = encode32BitLiteral(MO.getImm(), OpType);
4821+
int64_t Value = encode32BitLiteral(*Imm, OpType);
47754822
if (NumLiterals == 0 || LiteralValue != Value) {
47764823
LiteralValue = Value;
47774824
++NumLiterals;
47784825
}
4779-
} else if (MO.isExpr()) {
4780-
++NumExprs;
47814826
}
47824827
}
47834828
}
@@ -5010,9 +5055,18 @@ bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst,
50105055
if (!isSISrcOperand(Desc, OpIdx))
50115056
continue;
50125057

5058+
std::optional<int64_t> Imm;
5059+
if (MO.isImm())
5060+
Imm = MO.getImm();
5061+
else if (MO.isExpr() && isLitExpr(MO.getExpr()))
5062+
Imm = getLitValue(MO.getExpr());
5063+
50135064
bool IsAnotherLiteral = false;
5014-
if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) {
5015-
uint64_t Value = static_cast<uint64_t>(MO.getImm());
5065+
if (!Imm.has_value()) {
5066+
// Literal value not known, so we conservately assume it's different.
5067+
IsAnotherLiteral = true;
5068+
} else if (!isInlineConstant(Inst, OpIdx)) {
5069+
uint64_t Value = *Imm;
50165070
bool IsForcedFP64 =
50175071
Desc.operands()[OpIdx].OperandType == AMDGPU::OPERAND_KIMM64 ||
50185072
(Desc.operands()[OpIdx].OperandType == AMDGPU::OPERAND_REG_IMM_FP64 &&
@@ -5033,9 +5087,6 @@ bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst,
50335087

50345088
IsAnotherLiteral = !LiteralValue || *LiteralValue != Value;
50355089
LiteralValue = Value;
5036-
} else if (MO.isExpr()) {
5037-
// Literal value not known, so we conservately assume it's different.
5038-
IsAnotherLiteral = true;
50395090
}
50405091

50415092
if (IsAnotherLiteral && !HasMandatoryLiteral &&

0 commit comments

Comments
 (0)