Skip to content

Commit 58bb1ab

Browse files
dmoliveiraCopilot
andcommitted
Merge feat/classifier-tui-feedback: TUI visual feedback for classifier skip events
Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
2 parents 86a9630 + 887a1c6 commit 58bb1ab

File tree

3 files changed

+214
-0
lines changed

3 files changed

+214
-0
lines changed

plugin/gateway-core/dist/hooks/provider-error-classifier/index.js

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,8 +71,27 @@ function buildHint(classification, reason) {
7171
"- Continue with minimal prompt scope until provider stabilizes",
7272
].join("\n");
7373
}
74+
function buildConcurrencySkipHint() {
75+
return [
76+
"[provider ERROR CLASSIFIER]",
77+
"LLM classification skipped — subprocess already in progress.",
78+
"- A decision subprocess is already running; this event was dropped.",
79+
"- The active classification will complete shortly.",
80+
"- Pattern-based error detection continues normally.",
81+
].join("\n");
82+
}
83+
function buildCooldownSkipHint() {
84+
return [
85+
"[provider ERROR CLASSIFIER]",
86+
"LLM classification paused — runtime cooldown active.",
87+
"- The decision runtime is cooling down after a recent subprocess failure.",
88+
"- Provider errors will not be LLM-classified until the cooldown expires.",
89+
"- Pattern-based classification continues normally.",
90+
].join("\n");
91+
}
7492
export function createProviderErrorClassifierHook(options) {
7593
const lastClassificationBySession = new Map();
94+
const lastRuntimeSkipNoticeBySession = new Map();
7695
return {
7796
id: "provider-error-classifier",
7897
priority: 361,
@@ -84,6 +103,7 @@ export function createProviderErrorClassifierHook(options) {
84103
const sessionId = resolveSessionId((payload ?? {}));
85104
if (sessionId) {
86105
lastClassificationBySession.delete(sessionId);
106+
lastRuntimeSkipNoticeBySession.delete(sessionId);
87107
}
88108
return;
89109
}
@@ -117,6 +137,27 @@ export function createProviderErrorClassifierHook(options) {
117137
},
118138
cacheKey: `provider-error:${text.trim().toLowerCase()}`,
119139
});
140+
if (decision.skippedReason === "max_concurrency_reached" || decision.skippedReason === "runtime_cooldown") {
141+
if (session && sessionId) {
142+
const now = Date.now();
143+
const cooldownMs = Math.max(1, Math.floor(options.cooldownMs));
144+
const lastNoticeAt = lastRuntimeSkipNoticeBySession.get(sessionId) ?? 0;
145+
if (now - lastNoticeAt >= cooldownMs) {
146+
const hint = decision.skippedReason === "max_concurrency_reached"
147+
? buildConcurrencySkipHint()
148+
: buildCooldownSkipHint();
149+
const directory = resolveDirectory(eventPayload, options.directory);
150+
await injectHookMessage({ session, sessionId, content: hint, directory });
151+
writeGatewayEventAudit(directory, {
152+
hook: "provider-error-classifier",
153+
stage: "state",
154+
reason_code: `llm_decision_${decision.skippedReason}`,
155+
session_id: sessionId,
156+
});
157+
lastRuntimeSkipNoticeBySession.set(sessionId, now);
158+
}
159+
}
160+
}
120161
if (decision.accepted) {
121162
const classification = CLASSIFICATION_BY_CHAR[decision.char];
122163
if (classification) {

plugin/gateway-core/src/hooks/provider-error-classifier/index.ts

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,26 @@ function buildHint(classification: Classification, reason: string): string {
108108
].join("\n")
109109
}
110110

111+
function buildConcurrencySkipHint(): string {
112+
return [
113+
"[provider ERROR CLASSIFIER]",
114+
"LLM classification skipped — subprocess already in progress.",
115+
"- A decision subprocess is already running; this event was dropped.",
116+
"- The active classification will complete shortly.",
117+
"- Pattern-based error detection continues normally.",
118+
].join("\n")
119+
}
120+
121+
function buildCooldownSkipHint(): string {
122+
return [
123+
"[provider ERROR CLASSIFIER]",
124+
"LLM classification paused — runtime cooldown active.",
125+
"- The decision runtime is cooling down after a recent subprocess failure.",
126+
"- Provider errors will not be LLM-classified until the cooldown expires.",
127+
"- Pattern-based classification continues normally.",
128+
].join("\n")
129+
}
130+
111131
export function createProviderErrorClassifierHook(options: {
112132
directory: string
113133
enabled: boolean
@@ -116,6 +136,7 @@ export function createProviderErrorClassifierHook(options: {
116136
decisionRuntime?: LlmDecisionRuntime
117137
}): GatewayHook {
118138
const lastClassificationBySession = new Map<string, { classification: Classification; at: number }>()
139+
const lastRuntimeSkipNoticeBySession = new Map<string, number>()
119140
return {
120141
id: "provider-error-classifier",
121142
priority: 361,
@@ -127,6 +148,7 @@ export function createProviderErrorClassifierHook(options: {
127148
const sessionId = resolveSessionId((payload ?? {}) as EventPayload)
128149
if (sessionId) {
129150
lastClassificationBySession.delete(sessionId)
151+
lastRuntimeSkipNoticeBySession.delete(sessionId)
130152
}
131153
return
132154
}
@@ -160,6 +182,27 @@ export function createProviderErrorClassifierHook(options: {
160182
},
161183
cacheKey: `provider-error:${text.trim().toLowerCase()}`,
162184
})
185+
if (decision.skippedReason === "max_concurrency_reached" || decision.skippedReason === "runtime_cooldown") {
186+
if (session && sessionId) {
187+
const now = Date.now()
188+
const cooldownMs = Math.max(1, Math.floor(options.cooldownMs))
189+
const lastNoticeAt = lastRuntimeSkipNoticeBySession.get(sessionId) ?? 0
190+
if (now - lastNoticeAt >= cooldownMs) {
191+
const hint = decision.skippedReason === "max_concurrency_reached"
192+
? buildConcurrencySkipHint()
193+
: buildCooldownSkipHint()
194+
const directory = resolveDirectory(eventPayload, options.directory)
195+
await injectHookMessage({ session, sessionId, content: hint, directory })
196+
writeGatewayEventAudit(directory, {
197+
hook: "provider-error-classifier",
198+
stage: "state",
199+
reason_code: `llm_decision_${decision.skippedReason}`,
200+
session_id: sessionId,
201+
})
202+
lastRuntimeSkipNoticeBySession.set(sessionId, now)
203+
}
204+
}
205+
}
163206
if (decision.accepted) {
164207
const classification = CLASSIFICATION_BY_CHAR[decision.char]
165208
if (classification) {

plugin/gateway-core/test/provider-error-classifier-hook.test.mjs

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,3 +137,133 @@ test("provider-error-classifier skips events with no error content (prevents run
137137
assert.equal(decideCalls, 0, "LLM decision runtime must not be invoked for empty error payloads")
138138
assert.equal(prompts.length, 0)
139139
})
140+
141+
test("provider-error-classifier injects TUI hint when LLM decision is max_concurrency_reached", async () => {
142+
const prompts = []
143+
const hook = createProviderErrorClassifierHook({
144+
directory: process.cwd(),
145+
enabled: true,
146+
cooldownMs: 60000,
147+
client: { session: { async promptAsync(args) { prompts.push(args) } } },
148+
decisionRuntime: {
149+
config: {
150+
enabled: true,
151+
mode: "assist",
152+
command: "opencode",
153+
model: "openai/gpt-5.1-codex-mini",
154+
timeoutMs: 1000,
155+
maxPromptChars: 200,
156+
maxContextChars: 200,
157+
enableCache: false,
158+
cacheTtlMs: 10000,
159+
maxCacheEntries: 8,
160+
},
161+
decide: async () => ({
162+
mode: "assist",
163+
accepted: false,
164+
char: "",
165+
raw: "",
166+
durationMs: 1,
167+
model: "openai/gpt-5.1-codex-mini",
168+
templateId: "provider-error-classifier-v1",
169+
skippedReason: "max_concurrency_reached",
170+
}),
171+
},
172+
})
173+
174+
await hook.event("session.error", {
175+
properties: { sessionID: "s7", error: "Unusual provider error with unknown wording" },
176+
})
177+
178+
assert.equal(prompts.length, 1)
179+
assert.match(String(prompts[0].body.parts[0].text), /\[provider ERROR CLASSIFIER\]/i)
180+
assert.match(String(prompts[0].body.parts[0].text), /subprocess already in progress/i)
181+
})
182+
183+
test("provider-error-classifier injects TUI hint when LLM decision is runtime_cooldown", async () => {
184+
const prompts = []
185+
const hook = createProviderErrorClassifierHook({
186+
directory: process.cwd(),
187+
enabled: true,
188+
cooldownMs: 60000,
189+
client: { session: { async promptAsync(args) { prompts.push(args) } } },
190+
decisionRuntime: {
191+
config: {
192+
enabled: true,
193+
mode: "assist",
194+
command: "opencode",
195+
model: "openai/gpt-5.1-codex-mini",
196+
timeoutMs: 1000,
197+
maxPromptChars: 200,
198+
maxContextChars: 200,
199+
enableCache: false,
200+
cacheTtlMs: 10000,
201+
maxCacheEntries: 8,
202+
},
203+
decide: async () => ({
204+
mode: "assist",
205+
accepted: false,
206+
char: "",
207+
raw: "",
208+
durationMs: 1,
209+
model: "openai/gpt-5.1-codex-mini",
210+
templateId: "provider-error-classifier-v1",
211+
skippedReason: "runtime_cooldown",
212+
}),
213+
},
214+
})
215+
216+
await hook.event("session.error", {
217+
properties: { sessionID: "s8", error: "Unusual provider error with unknown wording" },
218+
})
219+
220+
assert.equal(prompts.length, 1)
221+
assert.match(String(prompts[0].body.parts[0].text), /\[provider ERROR CLASSIFIER\]/i)
222+
assert.match(String(prompts[0].body.parts[0].text), /cooldown/i)
223+
})
224+
225+
test("provider-error-classifier deduplicates runtime skip notices within cooldown window", async () => {
226+
const prompts = []
227+
let callCount = 0
228+
const hook = createProviderErrorClassifierHook({
229+
directory: process.cwd(),
230+
enabled: true,
231+
cooldownMs: 60000,
232+
client: { session: { async promptAsync(args) { prompts.push(args) } } },
233+
decisionRuntime: {
234+
config: {
235+
enabled: true,
236+
mode: "assist",
237+
command: "opencode",
238+
model: "openai/gpt-5.1-codex-mini",
239+
timeoutMs: 1000,
240+
maxPromptChars: 200,
241+
maxContextChars: 200,
242+
enableCache: false,
243+
cacheTtlMs: 10000,
244+
maxCacheEntries: 8,
245+
},
246+
decide: async () => {
247+
callCount++
248+
return {
249+
mode: "assist",
250+
accepted: false,
251+
char: "",
252+
raw: "",
253+
durationMs: 1,
254+
model: "openai/gpt-5.1-codex-mini",
255+
templateId: "provider-error-classifier-v1",
256+
skippedReason: "max_concurrency_reached",
257+
}
258+
},
259+
},
260+
})
261+
262+
// Fire three events — all get skip notices from decide(), but only the first should inject
263+
await hook.event("session.error", { properties: { sessionID: "s9", error: "provider error alpha" } })
264+
await hook.event("session.error", { properties: { sessionID: "s9", error: "provider error beta" } })
265+
await hook.event("session.error", { properties: { sessionID: "s9", error: "provider error gamma" } })
266+
267+
assert.equal(callCount, 3, "decide() should be called for each event")
268+
assert.equal(prompts.length, 1, "Only one skip notice should be injected within the cooldown window")
269+
})

0 commit comments

Comments
 (0)