Skip to content

Commit 21e2262

Browse files
authored
Merge pull request #11 from Tarquinen/release/v0.3.10
Release v0.3.10
2 parents 0024e53 + aeb57c9 commit 21e2262

File tree

9 files changed

+189
-681
lines changed

9 files changed

+189
-681
lines changed

index.ts

Lines changed: 41 additions & 323 deletions
Large diffs are not rendered by default.

lib/config.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -135,8 +135,6 @@ function loadConfigFile(configPath: string): Partial<PluginConfig> | null {
135135
const fileContent = readFileSync(configPath, 'utf-8')
136136
return parse(fileContent) as Partial<PluginConfig>
137137
} catch (error: any) {
138-
const logger = new Logger(true)
139-
logger.error('config', `Failed to read config from ${configPath}: ${error.message}`)
140138
return null
141139
}
142140
}

lib/janitor.ts

Lines changed: 82 additions & 230 deletions
Large diffs are not rendered by default.

lib/logger.ts

Lines changed: 35 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -23,29 +23,55 @@ export class Logger {
2323
}
2424
}
2525

26+
/**
27+
* Formats data object into a compact, readable string
28+
* e.g., {saved: "~4.1K", pruned: 4, duplicates: 0} -> "saved=~4.1K pruned=4 duplicates=0"
29+
*/
30+
private formatData(data?: any): string {
31+
if (!data) return ""
32+
33+
const parts: string[] = []
34+
for (const [key, value] of Object.entries(data)) {
35+
if (value === undefined || value === null) continue
36+
37+
// Format arrays compactly
38+
if (Array.isArray(value)) {
39+
if (value.length === 0) continue
40+
parts.push(`${key}=[${value.slice(0, 3).join(",")}${value.length > 3 ? `...+${value.length - 3}` : ""}]`)
41+
}
42+
// Format objects inline if small, skip if large
43+
else if (typeof value === 'object') {
44+
const str = JSON.stringify(value)
45+
if (str.length < 50) {
46+
parts.push(`${key}=${str}`)
47+
}
48+
}
49+
// Format primitives directly
50+
else {
51+
parts.push(`${key}=${value}`)
52+
}
53+
}
54+
return parts.join(" ")
55+
}
56+
2657
private async write(level: string, component: string, message: string, data?: any) {
2758
if (!this.enabled) return
2859

2960
try {
3061
await this.ensureLogDir()
3162

3263
const timestamp = new Date().toISOString()
33-
const logEntry = {
34-
timestamp,
35-
level,
36-
component,
37-
message,
38-
...(data && { data })
39-
}
64+
const dataStr = this.formatData(data)
65+
66+
// Simple, readable format: TIMESTAMP LEVEL component: message | key=value key=value
67+
const logLine = `${timestamp} ${level.padEnd(5)} ${component}: ${message}${dataStr ? " | " + dataStr : ""}\n`
4068

4169
const dailyLogDir = join(this.logDir, "daily")
4270
if (!existsSync(dailyLogDir)) {
4371
await mkdir(dailyLogDir, { recursive: true })
4472
}
4573

4674
const logFile = join(dailyLogDir, `${new Date().toISOString().split('T')[0]}.log`)
47-
const logLine = JSON.stringify(logEntry) + "\n"
48-
4975
await writeFile(logFile, logLine, { flag: "a" })
5076
} catch (error) {
5177
// Silently fail - don't break the plugin if logging fails
@@ -140,7 +166,6 @@ export class Logger {
140166
// We detect being "inside a string" by tracking quotes
141167
let result = ''
142168
let inString = false
143-
let escaped = false
144169

145170
for (let i = 0; i < jsonText.length; i++) {
146171
const char = jsonText[i]
@@ -237,15 +262,6 @@ export class Logger {
237262
const jsonString = JSON.stringify(content, null, 2)
238263

239264
await writeFile(filepath, jsonString)
240-
241-
// Log that we saved it
242-
await this.debug("logger", "Saved AI context", {
243-
sessionID,
244-
filepath,
245-
messageCount: messages.length,
246-
isJanitorShadow,
247-
parsed: isJanitorShadow
248-
})
249265
} catch (error) {
250266
// Silently fail - don't break the plugin if logging fails
251267
}

lib/model-selector.ts

Lines changed: 4 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -120,8 +120,6 @@ export async function selectModel(
120120
configModel?: string,
121121
workspaceDir?: string
122122
): Promise<ModelSelectionResult> {
123-
logger?.info('model-selector', 'Model selection started', { currentModel, configModel, workspaceDir });
124-
125123
// Lazy import with retry logic - handles plugin initialization timing issues
126124
// Some providers (like openai via @openhax/codex) may not be ready on first attempt
127125
// Pass workspaceDir so OpencodeAI can find project-level config and plugins
@@ -133,32 +131,20 @@ export async function selectModel(
133131
if (configModel) {
134132
const parts = configModel.split('/');
135133
if (parts.length !== 2) {
136-
logger?.warn('model-selector', '✗ Invalid config model format, expected "provider/model"', {
137-
configModel
138-
});
134+
logger?.warn('model-selector', 'Invalid config model format', { configModel });
139135
} else {
140136
const [providerID, modelID] = parts;
141-
logger?.debug('model-selector', 'Attempting to use config-specified model', {
142-
providerID,
143-
modelID
144-
});
145137

146138
try {
147139
const model = await opencodeAI.getLanguageModel(providerID, modelID);
148-
logger?.info('model-selector', '✓ Successfully using config-specified model', {
149-
providerID,
150-
modelID
151-
});
152140
return {
153141
model,
154142
modelInfo: { providerID, modelID },
155143
source: 'config',
156144
reason: 'Using model specified in dcp.jsonc config'
157145
};
158146
} catch (error: any) {
159-
logger?.warn('model-selector', '✗ Failed to use config-specified model, falling back', {
160-
providerID,
161-
modelID,
147+
logger?.warn('model-selector', `Config model failed: ${providerID}/${modelID}`, {
162148
error: error.message
163149
});
164150
failedModelInfo = { providerID, modelID };
@@ -169,39 +155,20 @@ export async function selectModel(
169155
// Step 2: Try user's current model (if not skipped provider)
170156
if (currentModel) {
171157
if (shouldSkipProvider(currentModel.providerID)) {
172-
logger?.info('model-selector', 'Skipping user model (provider not suitable for background tasks)', {
173-
providerID: currentModel.providerID,
174-
modelID: currentModel.modelID,
175-
reason: 'github-copilot and anthropic are skipped for analysis'
176-
});
177158
// Track as failed so we can show toast
178159
if (!failedModelInfo) {
179160
failedModelInfo = currentModel;
180161
}
181162
} else {
182-
logger?.debug('model-selector', 'Attempting to use user\'s current model', {
183-
providerID: currentModel.providerID,
184-
modelID: currentModel.modelID
185-
});
186-
187163
try {
188164
const model = await opencodeAI.getLanguageModel(currentModel.providerID, currentModel.modelID);
189-
logger?.info('model-selector', '✓ Successfully using user\'s current model', {
190-
providerID: currentModel.providerID,
191-
modelID: currentModel.modelID
192-
});
193165
return {
194166
model,
195167
modelInfo: currentModel,
196168
source: 'user-model',
197169
reason: 'Using current session model'
198170
};
199171
} catch (error: any) {
200-
logger?.warn('model-selector', '✗ Failed to use user\'s current model', {
201-
providerID: currentModel.providerID,
202-
modelID: currentModel.modelID,
203-
error: error.message
204-
});
205172
if (!failedModelInfo) {
206173
failedModelInfo = currentModel;
207174
}
@@ -210,43 +177,16 @@ export async function selectModel(
210177
}
211178

212179
// Step 3: Try fallback models from authenticated providers
213-
logger?.debug('model-selector', 'Fetching available authenticated providers');
214180
const providers = await opencodeAI.listProviders();
215-
const availableProviderIDs = Object.keys(providers);
216-
logger?.info('model-selector', 'Available authenticated providers', {
217-
providerCount: availableProviderIDs.length,
218-
providerIDs: availableProviderIDs,
219-
providers: Object.entries(providers).map(([id, info]: [string, any]) => ({
220-
id,
221-
source: info.source,
222-
name: info.info?.name
223-
}))
224-
});
225-
226-
logger?.debug('model-selector', 'Attempting fallback models from providers', {
227-
priorityOrder: PROVIDER_PRIORITY
228-
});
229181

230182
for (const providerID of PROVIDER_PRIORITY) {
231-
if (!providers[providerID]) {
232-
logger?.debug('model-selector', `Skipping ${providerID} (not authenticated)`);
233-
continue;
234-
}
183+
if (!providers[providerID]) continue;
235184

236185
const fallbackModelID = FALLBACK_MODELS[providerID];
237-
if (!fallbackModelID) {
238-
logger?.debug('model-selector', `Skipping ${providerID} (no fallback model configured)`);
239-
continue;
240-
}
241-
242-
logger?.debug('model-selector', `Attempting ${providerID}/${fallbackModelID}`);
186+
if (!fallbackModelID) continue;
243187

244188
try {
245189
const model = await opencodeAI.getLanguageModel(providerID, fallbackModelID);
246-
logger?.info('model-selector', `✓ Successfully using fallback model`, {
247-
providerID,
248-
modelID: fallbackModelID
249-
});
250190
return {
251191
model,
252192
modelInfo: { providerID, modelID: fallbackModelID },
@@ -255,9 +195,6 @@ export async function selectModel(
255195
failedModel: failedModelInfo
256196
};
257197
} catch (error: any) {
258-
logger?.warn('model-selector', `✗ Failed to use ${providerID}/${fallbackModelID}`, {
259-
error: error.message
260-
});
261198
continue;
262199
}
263200
}
@@ -270,14 +207,8 @@ export async function selectModel(
270207
* This can be used by the plugin to get the current session's model
271208
*/
272209
export function extractModelFromSession(sessionState: any, logger?: Logger): ModelInfo | undefined {
273-
logger?.debug('model-selector', 'Extracting model from session state');
274-
275210
// Try to get from ACP session state
276211
if (sessionState?.model?.providerID && sessionState?.model?.modelID) {
277-
logger?.info('model-selector', 'Found model in ACP session state', {
278-
providerID: sessionState.model.providerID,
279-
modelID: sessionState.model.modelID
280-
});
281212
return {
282213
providerID: sessionState.model.providerID,
283214
modelID: sessionState.model.modelID
@@ -288,17 +219,12 @@ export function extractModelFromSession(sessionState: any, logger?: Logger): Mod
288219
if (sessionState?.messages && Array.isArray(sessionState.messages)) {
289220
const lastMessage = sessionState.messages[sessionState.messages.length - 1];
290221
if (lastMessage?.model?.providerID && lastMessage?.model?.modelID) {
291-
logger?.info('model-selector', 'Found model in last message', {
292-
providerID: lastMessage.model.providerID,
293-
modelID: lastMessage.model.modelID
294-
});
295222
return {
296223
providerID: lastMessage.model.providerID,
297224
modelID: lastMessage.model.modelID
298225
};
299226
}
300227
}
301228

302-
logger?.warn('model-selector', 'Could not extract model from session state');
303229
return undefined;
304230
}

lib/state.ts

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,13 @@
11
// lib/state.ts
22

3+
export interface SessionStats {
4+
totalToolsPruned: number
5+
totalTokensSaved: number
6+
}
7+
38
export class StateManager {
49
private state: Map<string, string[]> = new Map()
10+
private stats: Map<string, SessionStats> = new Map()
511

612
async get(sessionID: string): Promise<string[]> {
713
return this.state.get(sessionID) ?? []
@@ -10,4 +16,18 @@ export class StateManager {
1016
async set(sessionID: string, prunedIds: string[]): Promise<void> {
1117
this.state.set(sessionID, prunedIds)
1218
}
19+
20+
async getStats(sessionID: string): Promise<SessionStats> {
21+
return this.stats.get(sessionID) ?? { totalToolsPruned: 0, totalTokensSaved: 0 }
22+
}
23+
24+
async addStats(sessionID: string, toolsPruned: number, tokensSaved: number): Promise<SessionStats> {
25+
const current = await this.getStats(sessionID)
26+
const updated: SessionStats = {
27+
totalToolsPruned: current.totalToolsPruned + toolsPruned,
28+
totalTokensSaved: current.totalTokensSaved + tokensSaved
29+
}
30+
this.stats.set(sessionID, updated)
31+
return updated
32+
}
1333
}

lib/tokenizer.ts

Lines changed: 4 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -10,41 +10,19 @@
1010
* is actually needed.
1111
*/
1212

13-
import type { Logger } from './logger'
14-
1513
/**
1614
* Batch estimates tokens for multiple text samples
1715
*
1816
* @param texts - Array of text strings to tokenize
19-
* @param logger - Optional logger instance
2017
* @returns Array of token counts
2118
*/
22-
export async function estimateTokensBatch(
23-
texts: string[],
24-
logger?: Logger
25-
): Promise<number[]> {
19+
export async function estimateTokensBatch(texts: string[]): Promise<number[]> {
2620
try {
2721
// Lazy import - only load the 53MB gpt-tokenizer package when actually needed
2822
const { encode } = await import('gpt-tokenizer')
29-
30-
const results = texts.map(text => {
31-
const tokens = encode(text)
32-
return tokens.length
33-
})
34-
35-
logger?.debug('tokenizer', 'Batch token estimation complete', {
36-
batchSize: texts.length,
37-
totalTokens: results.reduce((sum, count) => sum + count, 0),
38-
avgTokensPerText: Math.round(results.reduce((sum, count) => sum + count, 0) / results.length)
39-
})
40-
41-
return results
42-
} catch (error: any) {
43-
logger?.warn('tokenizer', 'Batch tokenization failed, using fallback', {
44-
error: error.message
45-
})
46-
47-
// Fallback to character-based estimation
23+
return texts.map(text => encode(text).length)
24+
} catch {
25+
// Fallback to character-based estimation if tokenizer fails
4826
return texts.map(text => Math.round(text.length / 4))
4927
}
5028
}

package-lock.json

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
{
22
"$schema": "https://json.schemastore.org/package.json",
33
"name": "@tarquinen/opencode-dcp",
4-
"version": "0.3.9",
4+
"version": "0.3.10",
55
"type": "module",
66
"description": "OpenCode plugin that optimizes token usage by pruning obsolete tool outputs from conversation context",
77
"main": "./dist/index.js",

0 commit comments

Comments
 (0)