Skip to content

Commit 552c58e

Browse files
committed
format instructions for gpt-5 only
1 parent cfa71c2 commit 552c58e

File tree

2 files changed

+24
-24
lines changed

2 files changed

+24
-24
lines changed

src/config.ts

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,14 @@ export const DEFAULT_RESET_CRON = process.env.DEFAULT_RESET_CRON || '0 0 1 */3 *
2020

2121
export const EMBED_MODEL = process.env.EMBED_MODEL ?? 'text-embedding-small'
2222

23+
export const formatInstructions = `
24+
Always format responses in plain Markdown.
25+
Use Markdown headings (#, ##, ###) for structure, even for normal text.
26+
Use lists, tables, and blockquotes where useful.
27+
Put math in $$ ... $$ for LaTeX rendering.
28+
Wrap code in triple backticks with the correct language tag (js, ts, py, etc.) so syntax highlighting and rendering work.
29+
`
30+
2331
/**
2432
* name: the acual model name, which is shown to users, configures the model to be used and is also the azure deployment name.
2533
*/
@@ -36,6 +44,7 @@ export const validModels = [
3644
name: 'gpt-5',
3745
context: 128_000,
3846
temperature: 1.0,
47+
instructions: formatInstructions,
3948
},
4049
{
4150
name: 'mock',
@@ -55,11 +64,3 @@ export const DEFAULT_MODEL_ON_ENABLE = 'gpt-5'
5564

5665
export const DEFAULT_ASSISTANT_INSTRUCTIONS = '' // 11th August 2025 we decided it should be empty
5766
export const DEFAULT_MODEL_TEMPERATURE = 0.5
58-
59-
export const formatInstructions = `
60-
Always format responses in plain Markdown.
61-
Use Markdown headings (#, ##, ###) for structure, even for normal text.
62-
Use lists, tables, and blockquotes where useful.
63-
Put math in $$ ... $$ for LaTeX rendering.
64-
Wrap code in triple backticks with the correct language tag (js, ts, py, etc.) so syntax highlighting and rendering work.
65-
`

src/server/services/langchain/chat.ts

Lines changed: 15 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@ import type { AIMessageChunk, BaseMessageLike } from '@langchain/core/messages'
44
import type { Runnable } from '@langchain/core/runnables'
55
import type { StructuredTool } from '@langchain/core/tools'
66
import { concat } from '@langchain/core/utils/stream'
7-
import { AzureChatOpenAI } from '@langchain/openai'
8-
import { formatInstructions, ValidModelName, validModels } from '../../../config'
7+
import { AzureChatOpenAI, ChatOpenAICallOptions } from '@langchain/openai'
8+
import { ValidModelName, validModels } from '../../../config'
99
import type { ChatEvent } from '../../../shared/chat'
1010
import type { ChatMessage } from '../../../shared/chat'
1111
import type { ChatToolDef, ChatToolOutput } from '../../../shared/tools'
@@ -16,20 +16,15 @@ import { MockModel } from './MockModel'
1616

1717
type ChatModel = Runnable<BaseLanguageModelInput, AIMessageChunk, BaseChatModelCallOptions>
1818

19-
const getChatModel = (model: ValidModelName, tools: StructuredTool[], temperature: number): ChatModel => {
20-
const modelConfig = validModels.find((m) => m.name === model)
21-
if (!modelConfig) {
22-
throw new Error(`Invalid model: ${model}`)
23-
}
24-
19+
const getChatModel = (modelConfig: (typeof validModels)[number], tools: StructuredTool[], temperature: number): ChatModel => {
2520
const chatModel =
2621
modelConfig.name === 'mock'
2722
? new MockModel({ tools, temperature })
28-
: new AzureChatOpenAI({
29-
model,
23+
: new AzureChatOpenAI<ChatOpenAICallOptions>({
24+
model: modelConfig.name,
3025
azureOpenAIApiKey: AZURE_API_KEY,
3126
azureOpenAIApiVersion: '2023-05-15',
32-
azureOpenAIApiDeploymentName: model, // In Azure, always use the acual model name as the deployment name
27+
azureOpenAIApiDeploymentName: modelConfig.name, // In Azure, always use the acual model name as the deployment name
3328
azureOpenAIApiInstanceName: AZURE_RESOURCE,
3429
temperature: 'temperature' in modelConfig ? modelConfig.temperature : temperature, // If model config specifies a temperature, use it; otherwise, use the user supplied temperature.
3530
reasoning: {
@@ -65,20 +60,24 @@ export const streamChat = async ({
6560
}) => {
6661
const toolsByName = Object.fromEntries(tools.map((tool) => [tool.name, tool]))
6762

68-
const chatModel = getChatModel(model, tools, temperature)
63+
const modelConfig = validModels.find((m) => m.name === model)
64+
if (!modelConfig) {
65+
throw new Error(`Invalid model: ${model}`)
66+
}
67+
68+
const chatModel = getChatModel(modelConfig, tools, temperature)
6969

7070
const messages: BaseMessageLike[] = [
71-
{
72-
role: 'system',
73-
content: formatInstructions,
74-
},
71+
...('instructions' in modelConfig ? [{ role: 'system', content: modelConfig.instructions }] : []),
7572
{
7673
role: 'system',
7774
content: systemMessage,
7875
},
7976
...chatMessages,
8077
]
8178

79+
console.log('📌 messages', messages)
80+
8281
const result = await chatTurn(chatModel, messages, toolsByName, writeEvent, user)
8382

8483
if (result.toolCalls.length > 0) {

0 commit comments

Comments
 (0)