@@ -137,3 +137,133 @@ test("provider-error-classifier skips events with no error content (prevents run
137137 assert . equal ( decideCalls , 0 , "LLM decision runtime must not be invoked for empty error payloads" )
138138 assert . equal ( prompts . length , 0 )
139139} )
140+
141+ test ( "provider-error-classifier injects TUI hint when LLM decision is max_concurrency_reached" , async ( ) => {
142+ const prompts = [ ]
143+ const hook = createProviderErrorClassifierHook ( {
144+ directory : process . cwd ( ) ,
145+ enabled : true ,
146+ cooldownMs : 60000 ,
147+ client : { session : { async promptAsync ( args ) { prompts . push ( args ) } } } ,
148+ decisionRuntime : {
149+ config : {
150+ enabled : true ,
151+ mode : "assist" ,
152+ command : "opencode" ,
153+ model : "openai/gpt-5.1-codex-mini" ,
154+ timeoutMs : 1000 ,
155+ maxPromptChars : 200 ,
156+ maxContextChars : 200 ,
157+ enableCache : false ,
158+ cacheTtlMs : 10000 ,
159+ maxCacheEntries : 8 ,
160+ } ,
161+ decide : async ( ) => ( {
162+ mode : "assist" ,
163+ accepted : false ,
164+ char : "" ,
165+ raw : "" ,
166+ durationMs : 1 ,
167+ model : "openai/gpt-5.1-codex-mini" ,
168+ templateId : "provider-error-classifier-v1" ,
169+ skippedReason : "max_concurrency_reached" ,
170+ } ) ,
171+ } ,
172+ } )
173+
174+ await hook . event ( "session.error" , {
175+ properties : { sessionID : "s7" , error : "Unusual provider error with unknown wording" } ,
176+ } )
177+
178+ assert . equal ( prompts . length , 1 )
179+ assert . match ( String ( prompts [ 0 ] . body . parts [ 0 ] . text ) , / \[ p r o v i d e r E R R O R C L A S S I F I E R \] / i)
180+ assert . match ( String ( prompts [ 0 ] . body . parts [ 0 ] . text ) , / s u b p r o c e s s a l r e a d y i n p r o g r e s s / i)
181+ } )
182+
183+ test ( "provider-error-classifier injects TUI hint when LLM decision is runtime_cooldown" , async ( ) => {
184+ const prompts = [ ]
185+ const hook = createProviderErrorClassifierHook ( {
186+ directory : process . cwd ( ) ,
187+ enabled : true ,
188+ cooldownMs : 60000 ,
189+ client : { session : { async promptAsync ( args ) { prompts . push ( args ) } } } ,
190+ decisionRuntime : {
191+ config : {
192+ enabled : true ,
193+ mode : "assist" ,
194+ command : "opencode" ,
195+ model : "openai/gpt-5.1-codex-mini" ,
196+ timeoutMs : 1000 ,
197+ maxPromptChars : 200 ,
198+ maxContextChars : 200 ,
199+ enableCache : false ,
200+ cacheTtlMs : 10000 ,
201+ maxCacheEntries : 8 ,
202+ } ,
203+ decide : async ( ) => ( {
204+ mode : "assist" ,
205+ accepted : false ,
206+ char : "" ,
207+ raw : "" ,
208+ durationMs : 1 ,
209+ model : "openai/gpt-5.1-codex-mini" ,
210+ templateId : "provider-error-classifier-v1" ,
211+ skippedReason : "runtime_cooldown" ,
212+ } ) ,
213+ } ,
214+ } )
215+
216+ await hook . event ( "session.error" , {
217+ properties : { sessionID : "s8" , error : "Unusual provider error with unknown wording" } ,
218+ } )
219+
220+ assert . equal ( prompts . length , 1 )
221+ assert . match ( String ( prompts [ 0 ] . body . parts [ 0 ] . text ) , / \[ p r o v i d e r E R R O R C L A S S I F I E R \] / i)
222+ assert . match ( String ( prompts [ 0 ] . body . parts [ 0 ] . text ) , / c o o l d o w n / i)
223+ } )
224+
225+ test ( "provider-error-classifier deduplicates runtime skip notices within cooldown window" , async ( ) => {
226+ const prompts = [ ]
227+ let callCount = 0
228+ const hook = createProviderErrorClassifierHook ( {
229+ directory : process . cwd ( ) ,
230+ enabled : true ,
231+ cooldownMs : 60000 ,
232+ client : { session : { async promptAsync ( args ) { prompts . push ( args ) } } } ,
233+ decisionRuntime : {
234+ config : {
235+ enabled : true ,
236+ mode : "assist" ,
237+ command : "opencode" ,
238+ model : "openai/gpt-5.1-codex-mini" ,
239+ timeoutMs : 1000 ,
240+ maxPromptChars : 200 ,
241+ maxContextChars : 200 ,
242+ enableCache : false ,
243+ cacheTtlMs : 10000 ,
244+ maxCacheEntries : 8 ,
245+ } ,
246+ decide : async ( ) => {
247+ callCount ++
248+ return {
249+ mode : "assist" ,
250+ accepted : false ,
251+ char : "" ,
252+ raw : "" ,
253+ durationMs : 1 ,
254+ model : "openai/gpt-5.1-codex-mini" ,
255+ templateId : "provider-error-classifier-v1" ,
256+ skippedReason : "max_concurrency_reached" ,
257+ }
258+ } ,
259+ } ,
260+ } )
261+
262+ // Fire three events — all get skip notices from decide(), but only the first should inject
263+ await hook . event ( "session.error" , { properties : { sessionID : "s9" , error : "provider error alpha" } } )
264+ await hook . event ( "session.error" , { properties : { sessionID : "s9" , error : "provider error beta" } } )
265+ await hook . event ( "session.error" , { properties : { sessionID : "s9" , error : "provider error gamma" } } )
266+
267+ assert . equal ( callCount , 3 , "decide() should be called for each event" )
268+ assert . equal ( prompts . length , 1 , "Only one skip notice should be injected within the cooldown window" )
269+ } )
0 commit comments