Skip to content

Commit 75c29d4

Browse files
committed
summary optimizaitons
1 parent e103fb1 commit 75c29d4

File tree

9 files changed

+113
-70
lines changed

9 files changed

+113
-70
lines changed

packages/opencode/src/provider/provider.ts

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -658,20 +658,29 @@ export namespace Provider {
658658
}
659659

660660
const provider = await state().then((state) => state.providers[providerID])
661-
if (!provider) return
662-
let priority = ["claude-haiku-4-5", "claude-haiku-4.5", "3-5-haiku", "3.5-haiku", "gemini-2.5-flash", "gpt-5-nano"]
663-
// claude-haiku-4.5 is considered a premium model in github copilot, we shouldn't use premium requests for title gen
664-
if (providerID === "github-copilot") {
665-
priority = priority.filter((m) => m !== "claude-haiku-4.5")
666-
}
667-
if (providerID === "opencode" || providerID === "local") {
668-
priority = ["gpt-5-nano"]
669-
}
670-
for (const item of priority) {
671-
for (const model of Object.keys(provider.info.models)) {
672-
if (model.includes(item)) return getModel(providerID, model)
661+
if (provider) {
662+
let priority = [
663+
"claude-haiku-4-5",
664+
"claude-haiku-4.5",
665+
"3-5-haiku",
666+
"3.5-haiku",
667+
"gemini-2.5-flash",
668+
"gpt-5-nano",
669+
]
670+
// claude-haiku-4.5 is considered a premium model in github copilot, we shouldn't use premium requests for title gen
671+
if (providerID === "github-copilot") {
672+
priority = priority.filter((m) => m !== "claude-haiku-4.5")
673+
}
674+
if (providerID === "opencode" || providerID === "local") {
675+
priority = ["gpt-5-nano"]
676+
}
677+
for (const item of priority) {
678+
for (const model of Object.keys(provider.info.models)) {
679+
if (model.includes(item)) return getModel(providerID, model)
680+
}
673681
}
674682
}
683+
return getModel("opencode", "gpt-5-nano")
675684
}
676685

677686
const priority = ["gpt-5", "claude-sonnet-4", "big-pickle", "gemini-3-pro"]

packages/opencode/src/provider/transform.ts

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -128,12 +128,7 @@ export namespace ProviderTransform {
128128
return undefined
129129
}
130130

131-
export function options(
132-
providerID: string,
133-
modelID: string,
134-
npm: string,
135-
sessionID: string,
136-
): Record<string, any> | undefined {
131+
export function options(providerID: string, modelID: string, npm: string, sessionID: string): Record<string, any> {
137132
const result: Record<string, any> = {}
138133

139134
// switch to providerID later, for now use this
@@ -175,6 +170,25 @@ export namespace ProviderTransform {
175170
return result
176171
}
177172

173+
export function smallOptions(input: { providerID: string; modelID: string }) {
174+
const options: Record<string, any> = {}
175+
176+
if (input.providerID === "openai" || input.modelID.includes("gpt-5")) {
177+
if (input.modelID.includes("5.1")) {
178+
options["reasoningEffort"] = "low"
179+
} else {
180+
options["reasoningEffort"] = "minimal"
181+
}
182+
}
183+
if (input.providerID === "google") {
184+
options["thinkingConfig"] = {
185+
thinkingBudget: 0,
186+
}
187+
}
188+
189+
return options
190+
}
191+
178192
export function providerOptions(npm: string | undefined, providerID: string, options: { [x: string]: any }) {
179193
switch (npm) {
180194
case "@ai-sdk/openai":

packages/opencode/src/session/compaction.ts

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import { Log } from "../util/log"
1515
import { ProviderTransform } from "@/provider/transform"
1616
import { SessionProcessor } from "./processor"
1717
import { fn } from "@/util/fn"
18+
import { mergeDeep, pipe } from "remeda"
1819

1920
export namespace SessionCompaction {
2021
const log = Log.create({ service: "session.compaction" })
@@ -96,7 +97,7 @@ export namespace SessionCompaction {
9697
abort: AbortSignal
9798
}) {
9899
const model = await Provider.getModel(input.model.providerID, input.model.modelID)
99-
const system = [...SystemPrompt.summarize(model.providerID)]
100+
const system = [...SystemPrompt.compaction(model.providerID)]
100101
const msg = (await Session.updateMessage({
101102
id: Identifier.ascending("message"),
102103
role: "assistant",
@@ -137,10 +138,15 @@ export namespace SessionCompaction {
137138
},
138139
// set to 0, we handle loop
139140
maxRetries: 0,
140-
providerOptions: ProviderTransform.providerOptions(model.npm, model.providerID, {
141-
...ProviderTransform.options(model.providerID, model.modelID, model.npm ?? "", input.sessionID),
142-
...model.info.options,
143-
}),
141+
providerOptions: ProviderTransform.providerOptions(
142+
model.npm,
143+
model.providerID,
144+
pipe(
145+
{},
146+
mergeDeep(ProviderTransform.options(model.providerID, model.modelID, model.npm ?? "", input.sessionID)),
147+
mergeDeep(model.info.options),
148+
),
149+
),
144150
headers: model.info.headers,
145151
abortSignal: input.abort,
146152
tools: model.info.tool_call ? {} : undefined,

packages/opencode/src/session/prompt.ts

Lines changed: 13 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -489,11 +489,12 @@ export namespace SessionPrompt {
489489
? (agent.temperature ?? ProviderTransform.temperature(model.providerID, model.modelID))
490490
: undefined,
491491
topP: agent.topP ?? ProviderTransform.topP(model.providerID, model.modelID),
492-
options: {
493-
...ProviderTransform.options(model.providerID, model.modelID, model.npm ?? "", sessionID),
494-
...model.info.options,
495-
...agent.options,
496-
},
492+
options: pipe(
493+
{},
494+
mergeDeep(ProviderTransform.options(model.providerID, model.modelID, model.npm ?? "", sessionID)),
495+
mergeDeep(model.info.options),
496+
mergeDeep(agent.options),
497+
),
497498
},
498499
)
499500

@@ -1384,7 +1385,6 @@ export namespace SessionPrompt {
13841385
return result
13851386
}
13861387

1387-
// TODO: wire this back up
13881388
async function ensureTitle(input: {
13891389
session: Session.Info
13901390
message: MessageV2.WithParts
@@ -1398,24 +1398,13 @@ export namespace SessionPrompt {
13981398
input.history.filter((m) => m.info.role === "user" && !m.parts.every((p) => "synthetic" in p && p.synthetic))
13991399
.length === 1
14001400
if (!isFirst) return
1401-
const small =
1402-
(await Provider.getSmallModel(input.providerID)) ?? (await Provider.getModel(input.providerID, input.modelID))
1403-
const options = {
1404-
...ProviderTransform.options(small.providerID, small.modelID, small.npm ?? "", input.session.id),
1405-
...small.info.options,
1406-
}
1407-
if (small.providerID === "openai" || small.modelID.includes("gpt-5")) {
1408-
if (small.modelID.includes("5.1")) {
1409-
options["reasoningEffort"] = "low"
1410-
} else {
1411-
options["reasoningEffort"] = "minimal"
1412-
}
1413-
}
1414-
if (small.providerID === "google") {
1415-
options["thinkingConfig"] = {
1416-
thinkingBudget: 0,
1417-
}
1418-
}
1401+
const small = await Provider.getSmallModel(input.providerID)
1402+
const options = pipe(
1403+
{},
1404+
mergeDeep(ProviderTransform.options(small.providerID, small.modelID, small.npm ?? "", input.session.id)),
1405+
mergeDeep(ProviderTransform.smallOptions({ providerID: small.providerID, modelID: small.modelID })),
1406+
mergeDeep(small.info.options),
1407+
)
14191408
await generateText({
14201409
maxOutputTokens: small.info.reasoning ? 1500 : 20,
14211410
providerOptions: ProviderTransform.providerOptions(small.npm, small.providerID, options),
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
You are a helpful AI assistant tasked with summarizing conversations.
2+
3+
When asked to summarize, provide a detailed but concise summary of the conversation.
4+
Focus on information that would be helpful for continuing the conversation, including:
5+
- What was done
6+
- What is currently being worked on
7+
- Which files are being modified
8+
- What needs to be done next
9+
10+
Your summary should be comprehensive enough to provide context but concise enough to be quickly understood.

packages/opencode/src/session/prompt/summarize-turn.txt

Lines changed: 0 additions & 5 deletions
This file was deleted.
Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,4 @@
1-
You are a helpful AI assistant tasked with summarizing conversations.
2-
3-
When asked to summarize, provide a detailed but concise summary of the conversation.
4-
Focus on information that would be helpful for continuing the conversation, including:
5-
- What was done
6-
- What is currently being worked on
7-
- Which files are being modified
8-
- What needs to be done next
9-
10-
Your summary should be comprehensive enough to provide context but concise enough to be quickly understood.
1+
Summarize the following conversation into 2 sentences MAX explaining what the
2+
assistant did and why
3+
Do not explain the user's input.
4+
Do not speak in the third person about the assistant.

packages/opencode/src/session/summary.ts

Lines changed: 24 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ import path from "path"
1313
import { Instance } from "@/project/instance"
1414
import { Storage } from "@/storage/storage"
1515
import { Bus } from "@/bus"
16+
import { mergeDeep, pipe } from "remeda"
1617

1718
export namespace SessionSummary {
1819
const log = Log.create({ service: "session.summary" })
@@ -73,13 +74,18 @@ export namespace SessionSummary {
7374

7475
const assistantMsg = messages.find((m) => m.info.role === "assistant")!.info as MessageV2.Assistant
7576
const small = await Provider.getSmallModel(assistantMsg.providerID)
76-
if (!small) return
77+
const options = pipe(
78+
{},
79+
mergeDeep(ProviderTransform.options(small.providerID, small.modelID, small.npm ?? "", assistantMsg.sessionID)),
80+
mergeDeep(ProviderTransform.smallOptions({ providerID: small.providerID, modelID: small.modelID })),
81+
mergeDeep(small.info.options),
82+
)
7783

7884
const textPart = msgWithParts.parts.find((p) => p.type === "text" && !p.synthetic) as MessageV2.TextPart
7985
if (textPart && !userMsg.summary?.title) {
8086
const result = await generateText({
8187
maxOutputTokens: small.info.reasoning ? 1500 : 20,
82-
providerOptions: ProviderTransform.providerOptions(small.npm, small.providerID, {}),
88+
providerOptions: ProviderTransform.providerOptions(small.npm, small.providerID, options),
8389
messages: [
8490
...SystemPrompt.title(small.providerID).map(
8591
(x): ModelMessage => ({
@@ -115,18 +121,28 @@ export namespace SessionSummary {
115121
.findLast((m) => m.info.role === "assistant")
116122
?.parts.findLast((p) => p.type === "text")?.text
117123
if (!summary || diffs.length > 0) {
124+
for (const msg of messages) {
125+
for (const part of msg.parts) {
126+
if (part.type === "tool" && part.state.status === "completed") {
127+
part.state.output = "[TOOL OUTPUT PRUNED]"
128+
}
129+
}
130+
}
118131
const result = await generateText({
119132
model: small.language,
120133
maxOutputTokens: 100,
134+
providerOptions: ProviderTransform.providerOptions(small.npm, small.providerID, options),
121135
messages: [
136+
...SystemPrompt.summarize(small.providerID).map(
137+
(x): ModelMessage => ({
138+
role: "system",
139+
content: x,
140+
}),
141+
),
142+
...MessageV2.toModelMessage(messages),
122143
{
123144
role: "user",
124-
content: `
125-
Summarize the following conversation into 2 sentences MAX explaining what the assistant did and why. Do not explain the user's input. Do not speak in the third person about the assistant.
126-
<conversation>
127-
${JSON.stringify(MessageV2.toModelMessage(messages))}
128-
</conversation>
129-
`,
145+
content: `Summarize the above conversation according to your system prompts.`,
130146
},
131147
],
132148
headers: small.info.headers,

packages/opencode/src/session/system.ts

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ import PROMPT_POLARIS from "./prompt/polaris.txt"
1313
import PROMPT_BEAST from "./prompt/beast.txt"
1414
import PROMPT_GEMINI from "./prompt/gemini.txt"
1515
import PROMPT_ANTHROPIC_SPOOF from "./prompt/anthropic_spoof.txt"
16+
import PROMPT_COMPACTION from "./prompt/compaction.txt"
1617
import PROMPT_SUMMARIZE from "./prompt/summarize.txt"
1718
import PROMPT_TITLE from "./prompt/title.txt"
1819
import PROMPT_CODEX from "./prompt/codex.txt"
@@ -116,6 +117,15 @@ export namespace SystemPrompt {
116117
return Promise.all(found).then((result) => result.filter(Boolean))
117118
}
118119

120+
export function compaction(providerID: string) {
121+
switch (providerID) {
122+
case "anthropic":
123+
return [PROMPT_ANTHROPIC_SPOOF.trim(), PROMPT_COMPACTION]
124+
default:
125+
return [PROMPT_COMPACTION]
126+
}
127+
}
128+
119129
export function summarize(providerID: string) {
120130
switch (providerID) {
121131
case "anthropic":

0 commit comments

Comments
 (0)