1212// ===----------------------------------------------------------------------===//
1313
1414#include " AMDGPUMIRFormatter.h"
15+ #include " SIDefines.h"
1516#include " SIMachineFunctionInfo.h"
1617
1718using namespace llvm ;
1819
20+ bool parseAtomicOrdering (StringRef Src, unsigned &Order) {
21+ Src.consume_front (" ." );
22+ for (unsigned I = 0 ; I <= (unsigned )AtomicOrdering::LAST; ++I) {
23+ if (Src == toIRString ((AtomicOrdering)I)) {
24+ Order = I;
25+ return true ;
26+ }
27+ }
28+ Order = ~0u ;
29+ return false ;
30+ }
31+
32+ static const char *fmtScope (unsigned Scope) {
33+ static const char *Names[] = {" none" , " singlethread" , " wavefront" ,
34+ " workgroup" , " agent" , " system" };
35+ return Names[Scope];
36+ }
37+
38+ bool parseAtomicScope (StringRef Src, unsigned &Scope) {
39+ Src.consume_front (" ." );
40+ for (unsigned I = 0 ;
41+ I != (unsigned )AMDGPU::SIAtomicScope::NUM_SI_ATOMIC_SCOPES; ++I) {
42+ if (Src == fmtScope (I)) {
43+ Scope = I;
44+ return true ;
45+ }
46+ }
47+ Scope = ~0u ;
48+ return false ;
49+ }
50+
51+ static const char *fmtAddrSpace (unsigned Space) {
52+ static const char *Names[] = {" none" , " global" , " lds" ,
53+ " scratch" , " gds" , " other" };
54+ return Names[Space];
55+ }
56+
57+ bool parseOneAddrSpace (StringRef Src, unsigned &AddrSpace) {
58+ if (Src == " none" ) {
59+ AddrSpace = (unsigned )AMDGPU::SIAtomicAddrSpace::NONE;
60+ return true ;
61+ }
62+ if (Src == " flat" ) {
63+ AddrSpace = (unsigned )AMDGPU::SIAtomicAddrSpace::FLAT;
64+ return true ;
65+ }
66+ if (Src == " atomic" ) {
67+ AddrSpace = (unsigned )AMDGPU::SIAtomicAddrSpace::ATOMIC;
68+ return true ;
69+ }
70+ if (Src == " all" ) {
71+ AddrSpace = (unsigned )AMDGPU::SIAtomicAddrSpace::ALL;
72+ return true ;
73+ }
74+ for (unsigned I = 1 , A = 1 ; A <= (unsigned )AMDGPU::SIAtomicAddrSpace::LAST;
75+ A <<= 1 , ++I) {
76+ if (Src == fmtAddrSpace (I)) {
77+ AddrSpace = A;
78+ return true ;
79+ }
80+ }
81+ AddrSpace = ~0u ;
82+ return false ;
83+ }
84+
85+ bool parseAddrSpace (StringRef Src, unsigned &AddrSpace) {
86+ Src = Src.trim ();
87+ Src.consume_front (" ." );
88+ while (!Src.empty ()) {
89+ auto [First, Rest] = Src.split (' .' );
90+ unsigned OneSpace;
91+ if (!parseOneAddrSpace (First, OneSpace))
92+ return false ;
93+ AddrSpace |= OneSpace;
94+ Src = Rest;
95+ }
96+ return true ;
97+ }
98+
99+ static void fmtAddrSpace (raw_ostream &OS, int64_t Imm) {
100+ OS << ' .' ;
101+ if (Imm == (unsigned )AMDGPU::SIAtomicAddrSpace::NONE) {
102+ OS << " none" ;
103+ return ;
104+ }
105+ if (Imm == (unsigned )AMDGPU::SIAtomicAddrSpace::FLAT) {
106+ OS << " flat" ;
107+ return ;
108+ }
109+ if (Imm == (unsigned )AMDGPU::SIAtomicAddrSpace::ATOMIC) {
110+ OS << " atomic" ;
111+ return ;
112+ }
113+ if (Imm == (unsigned )AMDGPU::SIAtomicAddrSpace::ALL) {
114+ OS << " all" ;
115+ return ;
116+ }
117+
118+ ListSeparator LS{" ." };
119+ auto AddrSpace = (AMDGPU::SIAtomicAddrSpace)Imm;
120+ const auto LAST = (unsigned )AMDGPU::SIAtomicAddrSpace::LAST;
121+
122+ for (unsigned A = 1 , I = 1 ; A <= LAST; A <<= 1 , ++I) {
123+ if (any (AddrSpace & (AMDGPU::SIAtomicAddrSpace)A))
124+ OS << LS << StringRef (fmtAddrSpace (I));
125+ }
126+ }
127+
128+ static void printFenceOperand (raw_ostream &OS, const MachineInstr &MI,
129+ std::optional<unsigned int > OpIdx, int64_t Imm) {
130+ #define GET_IDX (Name ) \
131+ AMDGPU::getNamedOperandIdx (AMDGPU::S_WAITCNT_FENCE_soft, AMDGPU::OpName::Name)
132+ if (OpIdx == GET_IDX (Ordering)) {
133+ assert (Imm <= (unsigned )AtomicOrdering::LAST);
134+ OS << ' .' << StringRef (toIRString ((AtomicOrdering)Imm));
135+ } else if (OpIdx == GET_IDX (Scope)) {
136+ assert (Imm < (unsigned )AMDGPU::SIAtomicScope::NUM_SI_ATOMIC_SCOPES);
137+ OS << ' .' << StringRef (fmtScope (Imm));
138+ } else if (OpIdx == GET_IDX (AddrSpace)) {
139+ fmtAddrSpace (OS, Imm);
140+ }
141+ #undef GET_IDX
142+ }
143+
19144void AMDGPUMIRFormatter::printImm (raw_ostream &OS, const MachineInstr &MI,
20145 std::optional<unsigned int > OpIdx, int64_t Imm) const {
21146
@@ -24,12 +149,46 @@ void AMDGPUMIRFormatter::printImm(raw_ostream &OS, const MachineInstr &MI,
24149 assert (OpIdx == 0 );
25150 printSDelayAluImm (Imm, OS);
26151 break ;
152+ case AMDGPU::S_WAITCNT_FENCE_soft:
153+ printFenceOperand (OS, MI, OpIdx, Imm);
154+ break ;
27155 default :
28156 MIRFormatter::printImm (OS, MI, OpIdx, Imm);
29157 break ;
30158 }
31159}
32160
161+ static bool
162+ parseFenceParameter (const unsigned int OpIdx, int64_t &Imm,
163+ llvm::StringRef &Src,
164+ llvm::MIRFormatter::ErrorCallbackType &ErrorCallback) {
165+ #define GET_IDX (Name ) \
166+ AMDGPU::getNamedOperandIdx (AMDGPU::S_WAITCNT_FENCE_soft, AMDGPU::OpName::Name)
167+ if (OpIdx == (unsigned )GET_IDX (Ordering)) {
168+ unsigned Order = 0 ;
169+ if (!parseAtomicOrdering (Src, Order))
170+ return ErrorCallback (Src.begin (), " Expected atomic ordering" );
171+ Imm = Order;
172+ return false ;
173+ }
174+ if (OpIdx == (unsigned )GET_IDX (Scope)) {
175+ unsigned Scope = 0 ;
176+ if (!parseAtomicScope (Src, Scope))
177+ return ErrorCallback (Src.begin (), " Expected atomic scope" );
178+ Imm = Scope;
179+ return false ;
180+ }
181+ if (OpIdx == (unsigned )GET_IDX (AddrSpace)) {
182+ unsigned AddrSpace = 0 ;
183+ if (!parseAddrSpace (Src, AddrSpace))
184+ return ErrorCallback (Src.begin (), " Expected address space" );
185+ Imm = AddrSpace;
186+ return false ;
187+ }
188+ return true ;
189+ #undef GET_IDX
190+ }
191+
33192// / Implement target specific parsing of immediate mnemonics. The mnemonic is
34193// / a string with a leading dot.
35194bool AMDGPUMIRFormatter::parseImmMnemonic (const unsigned OpCode,
@@ -41,6 +200,8 @@ bool AMDGPUMIRFormatter::parseImmMnemonic(const unsigned OpCode,
41200 switch (OpCode) {
42201 case AMDGPU::S_DELAY_ALU:
43202 return parseSDelayAluImmMnemonic (OpIdx, Imm, Src, ErrorCallback);
203+ case AMDGPU::S_WAITCNT_FENCE_soft:
204+ return parseFenceParameter (OpIdx, Imm, Src, ErrorCallback);
44205 default :
45206 break ;
46207 }
0 commit comments