@@ -80,12 +80,8 @@ void AMDGPUInstPrinter::printFP64ImmOperand(const MCInst *MI, unsigned OpNo,
8080 const MCSubtargetInfo &STI,
8181 raw_ostream &O) {
8282 // KIMM64
83- // This part needs to align with AMDGPUInstPrinter::printImmediate64.
8483 uint64_t Imm = MI->getOperand (OpNo).getImm ();
85- if (STI.hasFeature (AMDGPU::Feature64BitLiterals) && Lo_32 (Imm))
86- O << " lit64(" << formatHex (static_cast <uint64_t >(Imm)) << ' )' ;
87- else
88- O << formatHex (static_cast <uint64_t >(Hi_32 (Imm)));
84+ printLiteral64 (Imm, STI, O, /* IsFP=*/ true );
8985}
9086
9187void AMDGPUInstPrinter::printNamedBit (const MCInst *MI, unsigned OpNo,
@@ -624,16 +620,19 @@ void AMDGPUInstPrinter::printImmediate64(uint64_t Imm,
624620 else if (Imm == 0x3fc45f306dc9c882 &&
625621 STI.hasFeature (AMDGPU::FeatureInv2PiInlineImm))
626622 O << " 0.15915494309189532" ;
627- else {
628- // This part needs to align with AMDGPUOperand::addLiteralImmOperand.
629- if (IsFP) {
630- if (STI.hasFeature (AMDGPU::Feature64BitLiterals) && Lo_32 (Imm))
631- O << " lit64(" << formatHex (static_cast <uint64_t >(Imm)) << ' )' ;
632- else
633- O << formatHex (static_cast <uint64_t >(Hi_32 (Imm)));
634- return ;
635- }
623+ else
624+ printLiteral64 (Imm, STI, O, IsFP);
625+ }
636626
627+ void AMDGPUInstPrinter::printLiteral64 (uint64_t Imm, const MCSubtargetInfo &STI,
628+ raw_ostream &O, bool IsFP) {
629+ // This part needs to align with AMDGPUOperand::addLiteralImmOperand.
630+ if (IsFP) {
631+ if (STI.hasFeature (AMDGPU::Feature64BitLiterals) && Lo_32 (Imm))
632+ O << " lit64(" << formatHex (static_cast <uint64_t >(Imm)) << ' )' ;
633+ else
634+ O << formatHex (static_cast <uint64_t >(Hi_32 (Imm)));
635+ } else {
637636 if (STI.hasFeature (AMDGPU::Feature64BitLiterals) &&
638637 (!isInt<32 >(Imm) || !isUInt<32 >(Imm)))
639638 O << " lit64(" << formatHex (static_cast <uint64_t >(Imm)) << ' )' ;
0 commit comments