Skip to content

Commit 0b0d392

Browse files
authored
feat: better system prompt (#1258)
* refactor(agent): enhance system prompt with runtime context and remove browser injection * feat(renderer): add smart default maxTokens calculation with 32k cap - Add helper function to calculate safe default maxTokens - Apply 32k global limit as safety cap - Reserve space for thinking budget when reasoning is supported - Update both Chat and NewThread modes to use smart defaults - Remove hardcoded 8192 threshold logic - Add comprehensive tests for the calculation logic * fix: colada warning
1 parent f2f17ae commit 0b0d392

File tree

8 files changed

+358
-80
lines changed

8 files changed

+358
-80
lines changed

src/main/presenter/agentPresenter/message/messageBuilder.ts

Lines changed: 11 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,11 @@ import { CONVERSATION, ModelConfig, SearchResult, ChatMessage } from '@shared/pr
66
import type { MCPToolDefinition } from '@shared/presenter'
77

88
import { ContentEnricher } from '../../content/contentEnricher'
9-
import { BrowserContextBuilder } from '../../browser/BrowserContextBuilder'
109
import { modelCapabilities } from '../../configPresenter/modelCapabilities'
1110
import { enhanceSystemPromptWithDateTime } from '../utility/promptEnhancer'
1211
import { ToolCallCenter } from '../tool/toolCallCenter'
1312
import { nanoid } from 'nanoid'
13+
1414
import {
1515
addContextMessages,
1616
buildUserMessageContext,
@@ -89,16 +89,12 @@ export async function preparePromptContent({
8989
? '\n\n' + ContentEnricher.enrichUserMessageWithUrlContent(userContent, urlResults)
9090
: ''
9191

92-
const finalSystemPrompt = enhanceSystemPromptWithDateTime(systemPrompt, isImageGeneration)
93-
const agentWorkspacePath = conversation.settings.agentWorkspacePath?.trim() || null
94-
const finalSystemPromptWithWorkspace =
95-
isAgentMode && agentWorkspacePath
96-
? finalSystemPrompt
97-
? `${finalSystemPrompt}\n\nCurrent working directory: ${agentWorkspacePath}`
98-
: `Current working directory: ${agentWorkspacePath}`
99-
: finalSystemPrompt
92+
const finalSystemPrompt = enhanceSystemPromptWithDateTime(systemPrompt, {
93+
isImageGeneration,
94+
isAgentMode,
95+
agentWorkspacePath: conversation.settings.agentWorkspacePath?.trim() || null
96+
})
10097

101-
let browserContextPrompt = ''
10298
const { providerId, modelId } = conversation.settings
10399
const supportsVision = modelCapabilities.supportsVision(providerId, modelId)
104100
let toolDefinitions: MCPToolDefinition[] = []
@@ -110,36 +106,16 @@ export async function preparePromptContent({
110106
enabledMcpTools,
111107
chatMode,
112108
supportsVision,
113-
agentWorkspacePath
109+
agentWorkspacePath: conversation.settings.agentWorkspacePath?.trim() || null
114110
})
115111
} catch (error) {
116112
console.warn('AgentPresenter: Failed to load tool definitions', error)
117113
toolDefinitions = []
118114
}
119115
}
120116

121-
if (!isImageGeneration && isAgentMode) {
122-
try {
123-
const browserContext = await presenter.yoBrowserPresenter.getBrowserContext()
124-
browserContextPrompt = BrowserContextBuilder.buildSystemPrompt(
125-
browserContext.tabs,
126-
browserContext.activeTabId
127-
)
128-
} catch (error) {
129-
console.warn('AgentPresenter: Failed to load Yo Browser context/tools', error)
130-
}
131-
}
132-
133-
const finalSystemPromptWithBrowser = browserContextPrompt
134-
? finalSystemPromptWithWorkspace
135-
? `${finalSystemPromptWithWorkspace}\n${browserContextPrompt}`
136-
: browserContextPrompt
137-
: finalSystemPromptWithWorkspace
138-
139117
const systemPromptTokens =
140-
!isImageGeneration && finalSystemPromptWithBrowser
141-
? approximateTokenSize(finalSystemPromptWithBrowser)
142-
: 0
118+
!isImageGeneration && finalSystemPrompt ? approximateTokenSize(finalSystemPrompt) : 0
143119
const userMessageTokens = approximateTokenSize(userContent + enrichedUserMessage)
144120
const toolDefinitionsTokens = toolDefinitions.reduce((acc, tool) => {
145121
return acc + approximateTokenSize(JSON.stringify(tool))
@@ -158,7 +134,7 @@ export async function preparePromptContent({
158134

159135
const formattedMessages = formatMessagesForCompletion(
160136
selectedContextMessages,
161-
isImageGeneration ? '' : finalSystemPromptWithBrowser,
137+
isImageGeneration ? '' : finalSystemPrompt,
162138
artifacts,
163139
userContent,
164140
enrichedUserMessage,
@@ -214,7 +190,7 @@ export async function buildContinueToolCallContext({
214190
const formattedMessages: ChatMessage[] = []
215191

216192
if (systemPrompt) {
217-
const finalSystemPrompt = enhanceSystemPromptWithDateTime(systemPrompt)
193+
const finalSystemPrompt = enhanceSystemPromptWithDateTime(systemPrompt, {})
218194
formattedMessages.push({
219195
role: 'system',
220196
content: finalSystemPrompt
@@ -248,7 +224,7 @@ export async function buildPostToolExecutionContext({
248224
const supportsFunctionCall = Boolean(modelConfig?.functionCall)
249225

250226
if (systemPrompt) {
251-
const finalSystemPrompt = enhanceSystemPromptWithDateTime(systemPrompt)
227+
const finalSystemPrompt = enhanceSystemPromptWithDateTime(systemPrompt, {})
252228
formattedMessages.push({
253229
role: 'system',
254230
content: finalSystemPrompt
Lines changed: 49 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,7 @@
1-
export function enhanceSystemPromptWithDateTime(
2-
systemPrompt: string,
3-
isImageGeneration: boolean = false
4-
): string {
5-
if (isImageGeneration || !systemPrompt || !systemPrompt.trim()) {
6-
return systemPrompt
7-
}
1+
type PlatformName = 'macOS' | 'Windows' | 'Linux' | 'Unknown'
82

9-
const currentDateTime = new Date().toLocaleString('en-US', {
3+
function formatCurrentDateTime(): string {
4+
return new Date().toLocaleString('en-US', {
105
year: 'numeric',
116
month: 'long',
127
day: 'numeric',
@@ -16,6 +11,51 @@ export function enhanceSystemPromptWithDateTime(
1611
timeZoneName: 'short',
1712
hour12: false
1813
})
14+
}
15+
16+
function formatPlatformName(platform: NodeJS.Platform): PlatformName {
17+
if (platform === 'darwin') return 'macOS'
18+
if (platform === 'win32') return 'Windows'
19+
if (platform === 'linux') return 'Linux'
20+
return 'Unknown'
21+
}
22+
23+
interface EnhanceOptions {
24+
isImageGeneration?: boolean
25+
isAgentMode?: boolean
26+
agentWorkspacePath?: string | null
27+
platform?: NodeJS.Platform
28+
}
29+
30+
export function enhanceSystemPromptWithDateTime(
31+
systemPrompt: string,
32+
options: EnhanceOptions = {}
33+
): string {
34+
const {
35+
isImageGeneration = false,
36+
isAgentMode = false,
37+
agentWorkspacePath,
38+
platform = process.platform
39+
} = options
40+
41+
if (isImageGeneration) return systemPrompt
42+
43+
const trimmedPrompt = systemPrompt?.trim() ?? ''
44+
45+
const runtimeLines: string[] = [`## Runtime Context - Today is ${formatCurrentDateTime()}`]
46+
const platformName = formatPlatformName(platform)
47+
if (platformName !== 'Unknown') {
48+
runtimeLines.push(`- You are running on ${platformName}`)
49+
}
50+
51+
const normalizedWorkspace = agentWorkspacePath?.trim()
52+
if (isAgentMode && normalizedWorkspace) {
53+
runtimeLines.push(
54+
`- Current working directory: ${normalizedWorkspace} (All file operations and shell commands will be executed relative to this directory)`
55+
)
56+
}
57+
58+
const runtimeBlock = runtimeLines.join('\n')
1959

20-
return `${systemPrompt}\nToday is ${currentDateTime}`
60+
return trimmedPrompt ? `${trimmedPrompt}\n${runtimeBlock}` : runtimeBlock
2161
}

src/main/presenter/tabPresenter.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -217,6 +217,9 @@ export class TabPresenter implements ITabPresenter {
217217
}
218218

219219
// DevTools 不再自动打开(避免在 macOS 全屏时产生额外窗口/空间的异常体验)
220+
if (is.dev) {
221+
view.webContents.openDevTools({ mode: 'detach' })
222+
}
220223

221224
// 存储标签信息
222225
const tabId = view.webContents.id

src/renderer/src/components/NewThread.vue

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ import { CONFIG_EVENTS } from '@/events'
123123
import { useModelStore } from '@/stores/modelStore'
124124
import { useUiSettingsStore } from '@/stores/uiSettingsStore'
125125
import { useChatMode, type ChatMode } from '@/components/chat-input/composables/useChatMode'
126+
import { calculateSafeDefaultMaxTokens, GLOBAL_OUTPUT_TOKEN_MAX } from '@/utils/maxOutputTokens'
126127
127128
const configPresenter = usePresenter('configPresenter')
128129
const themeStore = useThemeStore()
@@ -155,8 +156,8 @@ const activeModel = ref({
155156
const temperature = ref(0.6)
156157
const contextLength = ref(16384)
157158
const contextLengthLimit = ref(16384)
158-
const maxTokens = ref(4096)
159-
const maxTokensLimit = ref(4096)
159+
const maxTokens = ref(GLOBAL_OUTPUT_TOKEN_MAX)
160+
const maxTokensLimit = ref(GLOBAL_OUTPUT_TOKEN_MAX)
160161
const systemPrompt = ref('')
161162
const artifacts = ref(uiSettingsStore.artifactsEffectEnabled ? 1 : 0)
162163
const thinkingBudget = ref<number | undefined>(undefined)
@@ -200,9 +201,20 @@ watch(
200201
)
201202
temperature.value = config.temperature ?? 0.7
202203
contextLength.value = config.contextLength
203-
maxTokens.value = config.maxTokens
204204
contextLengthLimit.value = config.contextLength
205205
maxTokensLimit.value = config.maxTokens
206+
207+
const safeDefaultMaxTokens = calculateSafeDefaultMaxTokens({
208+
modelMaxTokens: config.maxTokens || GLOBAL_OUTPUT_TOKEN_MAX,
209+
thinkingBudget: config.thinkingBudget,
210+
reasoningSupported: Boolean(config.reasoning)
211+
})
212+
213+
maxTokens.value = safeDefaultMaxTokens
214+
215+
if (maxTokens.value > (config.maxTokens || GLOBAL_OUTPUT_TOKEN_MAX)) {
216+
maxTokens.value = config.maxTokens || GLOBAL_OUTPUT_TOKEN_MAX
217+
}
206218
thinkingBudget.value = config.thinkingBudget
207219
enableSearch.value = config.enableSearch
208220
forcedSearch.value = config.forcedSearch

src/renderer/src/components/chat-input/composables/usePromptInputConfig.ts

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,9 @@ import { usePresenter } from '@/composables/usePresenter'
1313
import { useChatStore } from '@/stores/chat'
1414
import { useModelStore } from '@/stores/modelStore'
1515

16+
// === Utils ===
17+
import { calculateSafeDefaultMaxTokens, GLOBAL_OUTPUT_TOKEN_MAX } from '@/utils/maxOutputTokens'
18+
1619
/**
1720
* Composable for managing model configuration and synchronization with store
1821
* Handles bidirectional sync between local config refs and chatStore
@@ -105,13 +108,22 @@ export function usePromptInputConfig() {
105108
configContextLength.value = Math.max(2048, config.contextLength)
106109
}
107110

108-
const maxTokensMax = !config.maxTokens || config.maxTokens < 8192 ? 8192 : config.maxTokens
109-
if (configMaxTokens.value > maxTokensMax) {
110-
configMaxTokens.value = maxTokensMax
111-
} else if (configMaxTokens.value < 1024) {
111+
const safeDefaultMaxTokens = calculateSafeDefaultMaxTokens({
112+
modelMaxTokens: config.maxTokens || GLOBAL_OUTPUT_TOKEN_MAX,
113+
thinkingBudget: config.thinkingBudget,
114+
reasoningSupported: Boolean(config.reasoning)
115+
})
116+
117+
configMaxTokens.value = safeDefaultMaxTokens
118+
119+
if (configMaxTokens.value < 1024) {
112120
configMaxTokens.value = 1024
113121
}
114122

123+
if (configMaxTokensLimit.value && configMaxTokens.value > configMaxTokensLimit.value) {
124+
configMaxTokens.value = configMaxTokensLimit.value
125+
}
126+
115127
if (configTemperature.value === undefined || configTemperature.value === null) {
116128
configTemperature.value = config.temperature ?? 0.6
117129
}

src/renderer/src/stores/modelStore.ts

Lines changed: 58 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
import { ref } from 'vue'
1+
import { computed, type ComputedRef, ref } from 'vue'
22
import { defineStore } from 'pinia'
3-
import { useQuery, useQueryCache } from '@pinia/colada'
3+
import { useQueryCache, type DataState, type EntryKey, type UseQueryEntry } from '@pinia/colada'
44
import { useThrottleFn } from '@vueuse/core'
55
import type { MODEL_META, RENDERER_MODEL_META, ModelConfig } from '@shared/presenter'
66
import { ModelType } from '@shared/model'
@@ -15,6 +15,13 @@ const PROVIDER_MODELS_KEY = (providerId: string) => ['model-store', 'provider-mo
1515
const CUSTOM_MODELS_KEY = (providerId: string) => ['model-store', 'custom-models', providerId]
1616
const ENABLED_MODELS_KEY = (providerId: string) => ['model-store', 'enabled-models', providerId]
1717

18+
type ModelQueryHandle<TData> = {
19+
entry: UseQueryEntry<TData, unknown, TData | undefined>
20+
data: ComputedRef<TData | undefined>
21+
refresh: (throwOnError?: boolean) => Promise<DataState<TData, unknown, TData | undefined>>
22+
refetch: (throwOnError?: boolean) => Promise<DataState<TData, unknown, TData | undefined>>
23+
}
24+
1825
export const useModelStore = defineStore('model', () => {
1926
const configP = usePresenter('configPresenter')
2027
const llmP = usePresenter('llmproviderPresenter')
@@ -27,8 +34,9 @@ export const useModelStore = defineStore('model', () => {
2734
const customModels = ref<{ providerId: string; models: RENDERER_MODEL_META[] }[]>([])
2835
const listenersRegistered = ref(false)
2936

30-
const providerModelQueries = new Map<string, ReturnType<typeof getProviderModelsQuery>>()
31-
const customModelQueries = new Map<string, ReturnType<typeof getCustomModelsQuery>>()
37+
const providerModelQueries = new Map<string, ModelQueryHandle<MODEL_META[]>>()
38+
const customModelQueries = new Map<string, ModelQueryHandle<MODEL_META[]>>()
39+
const enabledModelQueries = new Map<string, ModelQueryHandle<RENDERER_MODEL_META[]>>()
3240
const queryCache = useQueryCache()
3341
const isAgentProvider = async (providerId: string): Promise<boolean> => {
3442
try {
@@ -115,37 +123,57 @@ export const useModelStore = defineStore('model', () => {
115123
type: (model.type ?? ModelType.Chat) as ModelType
116124
})
117125

118-
const getProviderModelsQuery = (providerId: string) => {
119-
if (!providerModelQueries.has(providerId)) {
120-
providerModelQueries.set(
121-
providerId,
122-
useQuery<MODEL_META[]>({
123-
key: () => PROVIDER_MODELS_KEY(providerId),
124-
staleTime: 30_000,
125-
query: async () => configP.getProviderModels(providerId)
126-
})
127-
)
126+
const createQueryHandle = <TData>(
127+
entry: UseQueryEntry<TData, unknown, TData | undefined>
128+
): ModelQueryHandle<TData> => {
129+
const data = computed(() => entry.state.value.data as TData | undefined)
130+
const refresh = (throwOnError?: boolean) => {
131+
const promise = queryCache.refresh(entry)
132+
return throwOnError ? promise : promise.catch(() => entry.state.value)
133+
}
134+
const refetch = (throwOnError?: boolean) => {
135+
const promise = queryCache.fetch(entry)
136+
return throwOnError ? promise : promise.catch(() => entry.state.value)
128137
}
129-
return providerModelQueries.get(providerId)!
138+
return { entry, data, refresh, refetch }
130139
}
131140

132-
const getCustomModelsQuery = (providerId: string) => {
133-
if (!customModelQueries.has(providerId)) {
134-
customModelQueries.set(
135-
providerId,
136-
useQuery<MODEL_META[]>({
137-
key: () => CUSTOM_MODELS_KEY(providerId),
138-
staleTime: 30_000,
139-
query: async () => configP.getCustomModels(providerId)
140-
})
141-
)
141+
const ensureQueryHandle = <TData>(
142+
map: Map<string, ModelQueryHandle<TData>>,
143+
providerId: string,
144+
options: {
145+
key: EntryKey
146+
staleTime: number
147+
query: () => Promise<TData>
142148
}
143-
return customModelQueries.get(providerId)!
149+
) => {
150+
const entry = queryCache.ensure<TData>(options)
151+
const existing = map.get(providerId)
152+
if (existing?.entry === entry) return existing
153+
const handle = createQueryHandle(entry)
154+
map.set(providerId, handle)
155+
return handle
144156
}
145157

146-
const getEnabledModelsQuery = (providerId: string) =>
147-
useQuery<RENDERER_MODEL_META[]>({
148-
key: () => ENABLED_MODELS_KEY(providerId),
158+
const getProviderModelsQuery = (providerId: string) => {
159+
return ensureQueryHandle(providerModelQueries, providerId, {
160+
key: PROVIDER_MODELS_KEY(providerId),
161+
staleTime: 30_000,
162+
query: async () => configP.getProviderModels(providerId)
163+
})
164+
}
165+
166+
const getCustomModelsQuery = (providerId: string) => {
167+
return ensureQueryHandle(customModelQueries, providerId, {
168+
key: CUSTOM_MODELS_KEY(providerId),
169+
staleTime: 30_000,
170+
query: async () => configP.getCustomModels(providerId)
171+
})
172+
}
173+
174+
const getEnabledModelsQuery = (providerId: string) => {
175+
return ensureQueryHandle(enabledModelQueries, providerId, {
176+
key: ENABLED_MODELS_KEY(providerId),
149177
staleTime: 30_000,
150178
query: async () => {
151179
const [providerModels, customModelsList] = await Promise.all([
@@ -160,6 +188,7 @@ export const useModelStore = defineStore('model', () => {
160188
.map((model) => ({ ...normalizeRendererModel(model, providerId), enabled: true }))
161189
}
162190
})
191+
}
163192

164193
const applyUserDefinedModelConfig = async (
165194
model: RENDERER_MODEL_META,

0 commit comments

Comments
 (0)