@@ -55,8 +55,6 @@ class AMDGPUAsmParser;
55
55
56
56
enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_AGPR, IS_TTMP, IS_SPECIAL };
57
57
58
- enum class LitModifier { None, Lit, Lit64 };
59
-
60
58
// ===----------------------------------------------------------------------===//
61
59
// Operand
62
60
// ===----------------------------------------------------------------------===//
@@ -1591,10 +1589,14 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
1591
1589
return static_cast <AMDGPUTargetStreamer &>(TS);
1592
1590
}
1593
1591
1594
- const MCRegisterInfo * getMRI () const {
1592
+ MCContext & getContext () const {
1595
1593
// We need this const_cast because for some reason getContext() is not const
1596
1594
// in MCAsmParser.
1597
- return const_cast <AMDGPUAsmParser*>(this )->getContext ().getRegisterInfo ();
1595
+ return const_cast <AMDGPUAsmParser *>(this )->MCTargetAsmParser ::getContext ();
1596
+ }
1597
+
1598
+ const MCRegisterInfo *getMRI () const {
1599
+ return getContext ().getRegisterInfo ();
1598
1600
}
1599
1601
1600
1602
const MCInstrInfo *getMII () const {
@@ -2313,6 +2315,11 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
2313
2315
APInt Literal (64 , Val);
2314
2316
uint8_t OpTy = InstDesc.operands ()[OpNum].OperandType ;
2315
2317
2318
+ bool CanUse64BitLiterals =
2319
+ AsmParser->has64BitLiterals () &&
2320
+ !(InstDesc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P));
2321
+ MCContext &Ctx = AsmParser->getContext ();
2322
+
2316
2323
if (Imm.IsFPImm ) { // We got fp literal token
2317
2324
switch (OpTy) {
2318
2325
case AMDGPU::OPERAND_REG_IMM_INT64:
@@ -2342,7 +2349,15 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
2342
2349
Val &= 0xffffffff00000000u ;
2343
2350
}
2344
2351
2345
- Inst.addOperand (MCOperand::createImm (Val));
2352
+ if ((OpTy == AMDGPU::OPERAND_REG_IMM_FP64 ||
2353
+ OpTy == AMDGPU::OPERAND_REG_INLINE_C_FP64 ||
2354
+ OpTy == AMDGPU::OPERAND_REG_INLINE_AC_FP64) &&
2355
+ CanUse64BitLiterals && Lo_32 (Val) != 0 ) {
2356
+ Inst.addOperand (MCOperand::createExpr (
2357
+ AMDGPUMCExpr::createLit (LitModifier::Lit64, Val, Ctx)));
2358
+ } else {
2359
+ Inst.addOperand (MCOperand::createImm (Val));
2360
+ }
2346
2361
return ;
2347
2362
}
2348
2363
@@ -2352,7 +2367,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
2352
2367
llvm_unreachable (" fp literal in 64-bit integer instruction." );
2353
2368
2354
2369
case AMDGPU::OPERAND_KIMM64:
2355
- Inst.addOperand (MCOperand::createImm (Val));
2370
+ if (CanUse64BitLiterals && Lo_32 (Val) != 0 ) {
2371
+ Inst.addOperand (MCOperand::createExpr (
2372
+ AMDGPUMCExpr::createLit (LitModifier::Lit64, Val, Ctx)));
2373
+ } else {
2374
+ Inst.addOperand (MCOperand::createImm (Val));
2375
+ }
2356
2376
return ;
2357
2377
2358
2378
case AMDGPU::OPERAND_REG_IMM_BF16:
@@ -2442,7 +2462,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
2442
2462
getModifiers ().Lit == LitModifier::Lit)
2443
2463
Val = Lo_32 (Val);
2444
2464
2445
- Inst.addOperand (MCOperand::createImm (Val));
2465
+ if (CanUse64BitLiterals && (!isInt<32 >(Val) || !isUInt<32 >(Val))) {
2466
+ Inst.addOperand (MCOperand::createExpr (
2467
+ AMDGPUMCExpr::createLit (LitModifier::Lit64, Val, Ctx)));
2468
+ } else {
2469
+ Inst.addOperand (MCOperand::createImm (Val));
2470
+ }
2446
2471
return ;
2447
2472
2448
2473
case AMDGPU::OPERAND_REG_IMM_FP64:
@@ -2469,7 +2494,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
2469
2494
Val = static_cast <uint64_t >(Val) << 32 ;
2470
2495
}
2471
2496
2472
- Inst.addOperand (MCOperand::createImm (Val));
2497
+ if (CanUse64BitLiterals && Lo_32 (Val) != 0 ) {
2498
+ Inst.addOperand (MCOperand::createExpr (
2499
+ AMDGPUMCExpr::createLit (LitModifier::Lit64, Val, Ctx)));
2500
+ } else {
2501
+ Inst.addOperand (MCOperand::createImm (Val));
2502
+ }
2473
2503
return ;
2474
2504
2475
2505
case AMDGPU::OPERAND_REG_IMM_INT16:
@@ -2491,7 +2521,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
2491
2521
getModifiers ().Lit != LitModifier::Lit64)
2492
2522
Val <<= 32 ;
2493
2523
2494
- Inst.addOperand (MCOperand::createImm (Val));
2524
+ if (CanUse64BitLiterals && Lo_32 (Val) != 0 ) {
2525
+ Inst.addOperand (MCOperand::createExpr (
2526
+ AMDGPUMCExpr::createLit (LitModifier::Lit64, Val, Ctx)));
2527
+ } else {
2528
+ Inst.addOperand (MCOperand::createImm (Val));
2529
+ }
2495
2530
return ;
2496
2531
2497
2532
default :
@@ -3640,7 +3675,7 @@ bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
3640
3675
3641
3676
const MCOperand &MO = Inst.getOperand (OpIdx);
3642
3677
3643
- int64_t Val = MO.getImm ();
3678
+ int64_t Val = MO.isImm () ? MO. getImm () : getLitValue (MO. getExpr () );
3644
3679
auto OpSize = AMDGPU::getOperandSize (Desc, OpIdx);
3645
3680
3646
3681
switch (OpSize) { // expected operand size
@@ -4768,16 +4803,26 @@ bool AMDGPUAsmParser::validateSOPLiteral(const MCInst &Inst,
4768
4803
const MCOperand &MO = Inst.getOperand (OpIdx);
4769
4804
// Exclude special imm operands (like that used by s_set_gpr_idx_on)
4770
4805
if (AMDGPU::isSISrcOperand (Desc, OpIdx)) {
4771
- if (MO.isImm () && !isInlineConstant (Inst, OpIdx)) {
4806
+ std::optional<int64_t > Imm;
4807
+ if (MO.isImm ()) {
4808
+ Imm = MO.getImm ();
4809
+ } else if (MO.isExpr ()) {
4810
+ if (isLitExpr (MO.getExpr ()))
4811
+ Imm = getLitValue (MO.getExpr ());
4812
+ } else {
4813
+ continue ;
4814
+ }
4815
+
4816
+ if (!Imm.has_value ()) {
4817
+ ++NumExprs;
4818
+ } else if (!isInlineConstant (Inst, OpIdx)) {
4772
4819
auto OpType = static_cast <AMDGPU::OperandType>(
4773
4820
Desc.operands ()[OpIdx].OperandType );
4774
- int64_t Value = encode32BitLiteral (MO. getImm () , OpType);
4821
+ int64_t Value = encode32BitLiteral (*Imm , OpType);
4775
4822
if (NumLiterals == 0 || LiteralValue != Value) {
4776
4823
LiteralValue = Value;
4777
4824
++NumLiterals;
4778
4825
}
4779
- } else if (MO.isExpr ()) {
4780
- ++NumExprs;
4781
4826
}
4782
4827
}
4783
4828
}
@@ -5010,9 +5055,18 @@ bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst,
5010
5055
if (!isSISrcOperand (Desc, OpIdx))
5011
5056
continue ;
5012
5057
5058
+ std::optional<int64_t > Imm;
5059
+ if (MO.isImm ())
5060
+ Imm = MO.getImm ();
5061
+ else if (MO.isExpr () && isLitExpr (MO.getExpr ()))
5062
+ Imm = getLitValue (MO.getExpr ());
5063
+
5013
5064
bool IsAnotherLiteral = false ;
5014
- if (MO.isImm () && !isInlineConstant (Inst, OpIdx)) {
5015
- uint64_t Value = static_cast <uint64_t >(MO.getImm ());
5065
+ if (!Imm.has_value ()) {
5066
+ // Literal value not known, so we conservately assume it's different.
5067
+ IsAnotherLiteral = true ;
5068
+ } else if (!isInlineConstant (Inst, OpIdx)) {
5069
+ uint64_t Value = *Imm;
5016
5070
bool IsForcedFP64 =
5017
5071
Desc.operands ()[OpIdx].OperandType == AMDGPU::OPERAND_KIMM64 ||
5018
5072
(Desc.operands ()[OpIdx].OperandType == AMDGPU::OPERAND_REG_IMM_FP64 &&
@@ -5033,9 +5087,6 @@ bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst,
5033
5087
5034
5088
IsAnotherLiteral = !LiteralValue || *LiteralValue != Value;
5035
5089
LiteralValue = Value;
5036
- } else if (MO.isExpr ()) {
5037
- // Literal value not known, so we conservately assume it's different.
5038
- IsAnotherLiteral = true ;
5039
5090
}
5040
5091
5041
5092
if (IsAnotherLiteral && !HasMandatoryLiteral &&
0 commit comments